text
stringlengths 213
32.3k
|
---|
from typing import Callable, List
from canary.api import LOCATION_MODE_AWAY, LOCATION_MODE_HOME, LOCATION_MODE_NIGHT
from homeassistant.components.alarm_control_panel import AlarmControlPanelEntity
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DATA_COORDINATOR, DOMAIN
from .coordinator import CanaryDataUpdateCoordinator
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up Canary alarm control panels based on a config entry."""
coordinator: CanaryDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][
DATA_COORDINATOR
]
alarms = [
CanaryAlarm(coordinator, location)
for location_id, location in coordinator.data["locations"].items()
]
async_add_entities(alarms, True)
class CanaryAlarm(CoordinatorEntity, AlarmControlPanelEntity):
"""Representation of a Canary alarm control panel."""
def __init__(self, coordinator, location):
"""Initialize a Canary security camera."""
super().__init__(coordinator)
self._location_id = location.location_id
self._location_name = location.name
@property
def location(self):
"""Return information about the location."""
return self.coordinator.data["locations"][self._location_id]
@property
def name(self):
"""Return the name of the alarm."""
return self._location_name
@property
def unique_id(self):
"""Return the unique ID of the alarm."""
return str(self._location_id)
@property
def state(self):
"""Return the state of the device."""
if self.location.is_private:
return STATE_ALARM_DISARMED
mode = self.location.mode
if mode.name == LOCATION_MODE_AWAY:
return STATE_ALARM_ARMED_AWAY
if mode.name == LOCATION_MODE_HOME:
return STATE_ALARM_ARMED_HOME
if mode.name == LOCATION_MODE_NIGHT:
return STATE_ALARM_ARMED_NIGHT
return None
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_ARM_NIGHT
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {"private": self.location.is_private}
def alarm_disarm(self, code=None):
"""Send disarm command."""
self.coordinator.canary.set_location_mode(
self._location_id, self.location.mode.name, True
)
def alarm_arm_home(self, code=None):
"""Send arm home command."""
self.coordinator.canary.set_location_mode(self._location_id, LOCATION_MODE_HOME)
def alarm_arm_away(self, code=None):
"""Send arm away command."""
self.coordinator.canary.set_location_mode(self._location_id, LOCATION_MODE_AWAY)
def alarm_arm_night(self, code=None):
"""Send arm night command."""
self.coordinator.canary.set_location_mode(
self._location_id, LOCATION_MODE_NIGHT
)
|
import os
from flask import Flask, render_template, request, redirect, jsonify
from react.render import render_component
DEBUG = True
app = Flask(__name__)
app.debug = DEBUG
comments = []
components_path = os.path.join(os.path.dirname(__file__), 'src')
def path(js_file):
return os.path.join(components_path, js_file)
@app.route('/')
def index():
store = {'component': 'CommentBox.jsx'}
store['props'] = {'comments': comments}
rendered = render_component(
os.path.join(os.getcwd(), 'static', 'js', path(store['component'])),
{
'comments': comments,
'url': '/comment/',
},
to_static_markup=True,
)
return render_template('index.html',
rendered=rendered,
store=store)
@app.route('/comment/', methods=('POST',))
def comment():
comments.append({
'author': request.form['author'],
'text': request.form['text'],
})
return jsonify({'comments': comments})
@app.route('/clear/')
def clear():
comments = []
return jsonify({'comments': comments})
if __name__ == '__main__':
app.run()
|
from __future__ import print_function
from scattertext.WhitespaceNLP import whitespace_nlp_with_sentences
from scattertext import FeatsFromGeneralInquirer, produce_scattertext_explorer
from scattertext import SampleCorpora
from scattertext.CorpusFromPandas import CorpusFromPandas
def main():
convention_df = SampleCorpora.ConventionData2012.get_data()
feat_builder = FeatsFromGeneralInquirer()
corpus = CorpusFromPandas(convention_df,
category_col='party',
text_col='text',
nlp=whitespace_nlp_with_sentences,
feats_from_spacy_doc=feat_builder).build()
html = produce_scattertext_explorer(corpus,
category='democrat',
category_name='Democratic',
not_category_name='Republican',
width_in_pixels=1000,
metadata=convention_df['speaker'],
use_non_text_features=True,
use_full_doc=True,
topic_model_term_lists=feat_builder.get_top_model_term_lists(),
metadata_descriptions=feat_builder.get_definitions()
)
open('./demo_general_inquirer.html', 'wb').write(html.encode('utf-8'))
print('Open ./demo_general_inquirer.html in Chrome or Firefox.')
if __name__ == '__main__':
main()
|
import os
import socket
from collections import defaultdict
from contextlib import contextmanager
from queue import Empty
from kombu.exceptions import ChannelError
from kombu.log import get_logger
from kombu.utils.json import loads, dumps
from kombu.utils.objects import cached_property
from . import virtual
try:
import etcd
except ImportError:
etcd = None
logger = get_logger('kombu.transport.etcd')
DEFAULT_PORT = 2379
DEFAULT_HOST = 'localhost'
class Channel(virtual.Channel):
"""Etcd Channel class which talks to the Etcd."""
prefix = 'kombu'
index = None
timeout = 10
session_ttl = 30
lock_ttl = 10
def __init__(self, *args, **kwargs):
if etcd is None:
raise ImportError('Missing python-etcd library')
super().__init__(*args, **kwargs)
port = self.connection.client.port or self.connection.default_port
host = self.connection.client.hostname or DEFAULT_HOST
logger.debug('Host: %s Port: %s Timeout: %s', host, port, self.timeout)
self.queues = defaultdict(dict)
self.client = etcd.Client(host=host, port=int(port))
def _key_prefix(self, queue):
"""Create and return the `queue` with the proper prefix.
Arguments:
queue (str): The name of the queue.
"""
return f'{self.prefix}/{queue}'
@contextmanager
def _queue_lock(self, queue):
"""Try to acquire a lock on the Queue.
It does so by creating a object called 'lock' which is locked by the
current session..
This way other nodes are not able to write to the lock object which
means that they have to wait before the lock is released.
Arguments:
queue (str): The name of the queue.
"""
lock = etcd.Lock(self.client, queue)
lock._uuid = self.lock_value
logger.debug(f'Acquiring lock {lock.name}')
lock.acquire(blocking=True, lock_ttl=self.lock_ttl)
try:
yield
finally:
logger.debug(f'Releasing lock {lock.name}')
lock.release()
def _new_queue(self, queue, **_):
"""Create a new `queue` if the `queue` doesn't already exist.
Arguments:
queue (str): The name of the queue.
"""
self.queues[queue] = queue
with self._queue_lock(queue):
try:
return self.client.write(
key=self._key_prefix(queue), dir=True, value=None)
except etcd.EtcdNotFile:
logger.debug(f'Queue "{queue}" already exists')
return self.client.read(key=self._key_prefix(queue))
def _has_queue(self, queue, **kwargs):
"""Verify that queue exists.
Returns:
bool: Should return :const:`True` if the queue exists
or :const:`False` otherwise.
"""
try:
self.client.read(self._key_prefix(queue))
return True
except etcd.EtcdKeyNotFound:
return False
def _delete(self, queue, *args, **_):
"""Delete a `queue`.
Arguments:
queue (str): The name of the queue.
"""
self.queues.pop(queue, None)
self._purge(queue)
def _put(self, queue, payload, **_):
"""Put `message` onto `queue`.
This simply writes a key to the Etcd store
Arguments:
queue (str): The name of the queue.
payload (dict): Message data which will be dumped to etcd.
"""
with self._queue_lock(queue):
key = self._key_prefix(queue)
if not self.client.write(
key=key,
value=dumps(payload),
append=True):
raise ChannelError(f'Cannot add key {key!r} to etcd')
def _get(self, queue, timeout=None):
"""Get the first available message from the queue.
Before it does so it acquires a lock on the store so
only one node reads at the same time. This is for read consistency
Arguments:
queue (str): The name of the queue.
timeout (int): Optional seconds to wait for a response.
"""
with self._queue_lock(queue):
key = self._key_prefix(queue)
logger.debug('Fetching key %s with index %s', key, self.index)
try:
result = self.client.read(
key=key, recursive=True,
index=self.index, timeout=self.timeout)
if result is None:
raise Empty()
item = result._children[-1]
logger.debug('Removing key {}'.format(item['key']))
msg_content = loads(item['value'])
self.client.delete(key=item['key'])
return msg_content
except (TypeError, IndexError, etcd.EtcdException) as error:
logger.debug('_get failed: {}:{}'.format(type(error), error))
raise Empty()
def _purge(self, queue):
"""Remove all `message`s from a `queue`.
Arguments:
queue (str): The name of the queue.
"""
with self._queue_lock(queue):
key = self._key_prefix(queue)
logger.debug(f'Purging queue at key {key}')
return self.client.delete(key=key, recursive=True)
def _size(self, queue):
"""Return the size of the `queue`.
Arguments:
queue (str): The name of the queue.
"""
with self._queue_lock(queue):
size = 0
try:
key = self._key_prefix(queue)
logger.debug('Fetching key recursively %s with index %s',
key, self.index)
result = self.client.read(
key=key, recursive=True,
index=self.index)
size = len(result._children)
except TypeError:
pass
logger.debug('Found %s keys under %s with index %s',
size, key, self.index)
return size
@cached_property
def lock_value(self):
return f'{socket.gethostname()}.{os.getpid()}'
class Transport(virtual.Transport):
"""Etcd storage Transport for Kombu."""
Channel = Channel
default_port = DEFAULT_PORT
driver_type = 'etcd'
driver_name = 'python-etcd'
polling_interval = 3
implements = virtual.Transport.implements.extend(
exchange_type=frozenset(['direct']))
def __init__(self, *args, **kwargs):
"""Create a new instance of etcd.Transport."""
if etcd is None:
raise ImportError('Missing python-etcd library')
super().__init__(*args, **kwargs)
self.connection_errors = (
virtual.Transport.connection_errors + (etcd.EtcdException, )
)
self.channel_errors = (
virtual.Transport.channel_errors + (etcd.EtcdException, )
)
def verify_connection(self, connection):
"""Verify the connection works."""
port = connection.client.port or self.default_port
host = connection.client.hostname or DEFAULT_HOST
logger.debug('Verify Etcd connection to %s:%s', host, port)
try:
etcd.Client(host=host, port=int(port))
return True
except ValueError:
pass
return False
def driver_version(self):
"""Return the version of the etcd library.
.. note::
python-etcd has no __version__. This is a workaround.
"""
try:
import pip.commands.freeze
for x in pip.commands.freeze.freeze():
if x.startswith('python-etcd'):
return x.split('==')[1]
except (ImportError, IndexError):
logger.warning('Unable to find the python-etcd version.')
return 'Unknown'
|
from pycfdns.exceptions import CloudflareConnectionException
from homeassistant.components.cloudflare.const import DOMAIN, SERVICE_UPDATE_RECORDS
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_RETRY,
)
from . import ENTRY_CONFIG, init_integration
from tests.common import MockConfigEntry
async def test_unload_entry(hass, cfupdate):
"""Test successful unload of entry."""
entry = await init_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state == ENTRY_STATE_LOADED
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ENTRY_STATE_NOT_LOADED
assert not hass.data.get(DOMAIN)
async def test_async_setup_raises_entry_not_ready(hass, cfupdate):
"""Test that it throws ConfigEntryNotReady when exception occurs during setup."""
instance = cfupdate.return_value
entry = MockConfigEntry(domain=DOMAIN, data=ENTRY_CONFIG)
entry.add_to_hass(hass)
instance.get_zone_id.side_effect = CloudflareConnectionException()
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_integration_services(hass, cfupdate):
"""Test integration services."""
instance = cfupdate.return_value
entry = await init_integration(hass)
assert entry.state == ENTRY_STATE_LOADED
await hass.services.async_call(
DOMAIN,
SERVICE_UPDATE_RECORDS,
{},
blocking=True,
)
await hass.async_block_till_done()
instance.update_records.assert_called_once()
|
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import get_coordinator
from .const import ATTRIBUTION, OPTION_WORLDWIDE
SENSORS = {
"confirmed": "mdi:emoticon-neutral-outline",
"current": "mdi:emoticon-sad-outline",
"recovered": "mdi:emoticon-happy-outline",
"deaths": "mdi:emoticon-cry-outline",
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Defer sensor setup to the shared sensor module."""
coordinator = await get_coordinator(hass)
async_add_entities(
CoronavirusSensor(coordinator, config_entry.data["country"], info_type)
for info_type in SENSORS
)
class CoronavirusSensor(CoordinatorEntity):
"""Sensor representing corona virus data."""
name = None
unique_id = None
def __init__(self, coordinator, country, info_type):
"""Initialize coronavirus sensor."""
super().__init__(coordinator)
if country == OPTION_WORLDWIDE:
self.name = f"Worldwide Coronavirus {info_type}"
else:
self.name = f"{coordinator.data[country].country} Coronavirus {info_type}"
self.unique_id = f"{country}-{info_type}"
self.country = country
self.info_type = info_type
@property
def available(self):
"""Return if sensor is available."""
return self.coordinator.last_update_success and (
self.country in self.coordinator.data or self.country == OPTION_WORLDWIDE
)
@property
def state(self):
"""State of the sensor."""
if self.country == OPTION_WORLDWIDE:
sum_cases = 0
for case in self.coordinator.data.values():
value = getattr(case, self.info_type)
if value is None:
continue
sum_cases += value
return sum_cases
return getattr(self.coordinator.data[self.country], self.info_type)
@property
def icon(self):
"""Return the icon."""
return SENSORS[self.info_type]
@property
def unit_of_measurement(self):
"""Return unit of measurement."""
return "people"
@property
def device_state_attributes(self):
"""Return device attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
|
from datetime import datetime
from eebrightbox import EEBrightBoxException
import pytest
from homeassistant.components.device_tracker import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_PLATFORM
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
def _configure_mock_get_devices(eebrightbox_mock):
eebrightbox_instance = eebrightbox_mock.return_value
eebrightbox_instance.__enter__.return_value = eebrightbox_instance
eebrightbox_instance.get_devices.return_value = [
{
"mac": "AA:BB:CC:DD:EE:FF",
"ip": "192.168.1.10",
"hostname": "hostnameAA",
"activity_ip": True,
"port": "eth0",
"time_last_active": datetime(2019, 1, 20, 16, 4, 0),
},
{
"mac": "11:22:33:44:55:66",
"hostname": "hostname11",
"ip": "192.168.1.11",
"activity_ip": True,
"port": "wl0",
"time_last_active": datetime(2019, 1, 20, 11, 9, 0),
},
{
"mac": "FF:FF:FF:FF:FF:FF",
"hostname": "hostnameFF",
"ip": "192.168.1.12",
"activity_ip": False,
"port": "wl1",
"time_last_active": datetime(2019, 1, 15, 16, 9, 0),
},
]
def _configure_mock_failed_config_check(eebrightbox_mock):
eebrightbox_instance = eebrightbox_mock.return_value
eebrightbox_instance.__enter__.side_effect = EEBrightBoxException(
"Failed to connect to the router"
)
@pytest.fixture(autouse=True)
def mock_dev_track(mock_device_tracker_conf):
"""Mock device tracker config loading."""
pass
@patch("homeassistant.components.ee_brightbox.device_tracker.EEBrightBox")
async def test_missing_credentials(eebrightbox_mock, hass):
"""Test missing credentials."""
_configure_mock_get_devices(eebrightbox_mock)
result = await async_setup_component(
hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "ee_brightbox"}}
)
assert result
await hass.async_block_till_done()
assert hass.states.get("device_tracker.hostnameaa") is None
assert hass.states.get("device_tracker.hostname11") is None
assert hass.states.get("device_tracker.hostnameff") is None
@patch("homeassistant.components.ee_brightbox.device_tracker.EEBrightBox")
async def test_invalid_credentials(eebrightbox_mock, hass):
"""Test invalid credentials."""
_configure_mock_failed_config_check(eebrightbox_mock)
result = await async_setup_component(
hass,
DOMAIN,
{DOMAIN: {CONF_PLATFORM: "ee_brightbox", CONF_PASSWORD: "test_password"}},
)
assert result
await hass.async_block_till_done()
assert hass.states.get("device_tracker.hostnameaa") is None
assert hass.states.get("device_tracker.hostname11") is None
assert hass.states.get("device_tracker.hostnameff") is None
@patch("homeassistant.components.ee_brightbox.device_tracker.EEBrightBox")
async def test_get_devices(eebrightbox_mock, hass):
"""Test valid configuration."""
_configure_mock_get_devices(eebrightbox_mock)
result = await async_setup_component(
hass,
DOMAIN,
{DOMAIN: {CONF_PLATFORM: "ee_brightbox", CONF_PASSWORD: "test_password"}},
)
assert result
await hass.async_block_till_done()
assert hass.states.get("device_tracker.hostnameaa") is not None
assert hass.states.get("device_tracker.hostname11") is not None
assert hass.states.get("device_tracker.hostnameff") is None
state = hass.states.get("device_tracker.hostnameaa")
assert state.attributes["mac"] == "AA:BB:CC:DD:EE:FF"
assert state.attributes["ip"] == "192.168.1.10"
assert state.attributes["port"] == "eth0"
assert state.attributes["last_active"] == datetime(2019, 1, 20, 16, 4, 0)
|
from functools import partial
import pytest
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
async def test_simple_function(hass):
"""Test simple function (executor)."""
calls = []
def test_funct(data):
"""Test function."""
calls.append(data)
async_dispatcher_connect(hass, "test", test_funct)
async_dispatcher_send(hass, "test", 3)
await hass.async_block_till_done()
assert calls == [3]
async_dispatcher_send(hass, "test", "bla")
await hass.async_block_till_done()
assert calls == [3, "bla"]
async def test_simple_function_unsub(hass):
"""Test simple function (executor) and unsub."""
calls1 = []
calls2 = []
def test_funct1(data):
"""Test function."""
calls1.append(data)
def test_funct2(data):
"""Test function."""
calls2.append(data)
async_dispatcher_connect(hass, "test1", test_funct1)
unsub = async_dispatcher_connect(hass, "test2", test_funct2)
async_dispatcher_send(hass, "test1", 3)
async_dispatcher_send(hass, "test2", 4)
await hass.async_block_till_done()
assert calls1 == [3]
assert calls2 == [4]
unsub()
async_dispatcher_send(hass, "test1", 5)
async_dispatcher_send(hass, "test2", 6)
await hass.async_block_till_done()
assert calls1 == [3, 5]
assert calls2 == [4]
# check don't kill the flow
unsub()
async_dispatcher_send(hass, "test1", 7)
async_dispatcher_send(hass, "test2", 8)
await hass.async_block_till_done()
assert calls1 == [3, 5, 7]
assert calls2 == [4]
async def test_simple_callback(hass):
"""Test simple callback (async)."""
calls = []
@callback
def test_funct(data):
"""Test function."""
calls.append(data)
async_dispatcher_connect(hass, "test", test_funct)
async_dispatcher_send(hass, "test", 3)
await hass.async_block_till_done()
assert calls == [3]
async_dispatcher_send(hass, "test", "bla")
await hass.async_block_till_done()
assert calls == [3, "bla"]
async def test_simple_coro(hass):
"""Test simple coro (async)."""
calls = []
async def async_test_funct(data):
"""Test function."""
calls.append(data)
async_dispatcher_connect(hass, "test", async_test_funct)
async_dispatcher_send(hass, "test", 3)
await hass.async_block_till_done()
assert calls == [3]
async_dispatcher_send(hass, "test", "bla")
await hass.async_block_till_done()
assert calls == [3, "bla"]
async def test_simple_function_multiargs(hass):
"""Test simple function (executor)."""
calls = []
def test_funct(data1, data2, data3):
"""Test function."""
calls.append(data1)
calls.append(data2)
calls.append(data3)
async_dispatcher_connect(hass, "test", test_funct)
async_dispatcher_send(hass, "test", 3, 2, "bla")
await hass.async_block_till_done()
assert calls == [3, 2, "bla"]
@pytest.mark.no_fail_on_log_exception
async def test_callback_exception_gets_logged(hass, caplog):
"""Test exception raised by signal handler."""
@callback
def bad_handler(*args):
"""Record calls."""
raise Exception("This is a bad message callback")
# wrap in partial to test message logging.
async_dispatcher_connect(hass, "test", partial(bad_handler))
async_dispatcher_send(hass, "test", "bad")
await hass.async_block_till_done()
await hass.async_block_till_done()
assert (
f"Exception in functools.partial({bad_handler}) when dispatching 'test': ('bad',)"
in caplog.text
)
|
import os
import socket
from queue import Empty
from kombu.utils.encoding import bytes_to_str, ensure_bytes
from kombu.utils.json import dumps, loads
from . import virtual
try:
import kazoo
from kazoo.client import KazooClient
from kazoo.recipe.queue import Queue
KZ_CONNECTION_ERRORS = (
kazoo.exceptions.SystemErrorException,
kazoo.exceptions.ConnectionLossException,
kazoo.exceptions.MarshallingErrorException,
kazoo.exceptions.UnimplementedException,
kazoo.exceptions.OperationTimeoutException,
kazoo.exceptions.NoAuthException,
kazoo.exceptions.InvalidACLException,
kazoo.exceptions.AuthFailedException,
kazoo.exceptions.SessionExpiredException,
)
KZ_CHANNEL_ERRORS = (
kazoo.exceptions.RuntimeInconsistencyException,
kazoo.exceptions.DataInconsistencyException,
kazoo.exceptions.BadArgumentsException,
kazoo.exceptions.MarshallingErrorException,
kazoo.exceptions.UnimplementedException,
kazoo.exceptions.OperationTimeoutException,
kazoo.exceptions.ApiErrorException,
kazoo.exceptions.NoNodeException,
kazoo.exceptions.NoAuthException,
kazoo.exceptions.NodeExistsException,
kazoo.exceptions.NoChildrenForEphemeralsException,
kazoo.exceptions.NotEmptyException,
kazoo.exceptions.SessionExpiredException,
kazoo.exceptions.InvalidCallbackException,
socket.error,
)
except ImportError:
kazoo = None # noqa
KZ_CONNECTION_ERRORS = KZ_CHANNEL_ERRORS = () # noqa
DEFAULT_PORT = 2181
__author__ = 'Mahendra M <[email protected]>'
class Channel(virtual.Channel):
"""Zookeeper Channel."""
_client = None
_queues = {}
def __init__(self, connection, **kwargs):
super().__init__(connection, **kwargs)
vhost = self.connection.client.virtual_host
self._vhost = '/{}'.format(vhost.strip('/'))
def _get_path(self, queue_name):
return os.path.join(self._vhost, queue_name)
def _get_queue(self, queue_name):
queue = self._queues.get(queue_name, None)
if queue is None:
queue = Queue(self.client, self._get_path(queue_name))
self._queues[queue_name] = queue
# Ensure that the queue is created
len(queue)
return queue
def _put(self, queue, message, **kwargs):
return self._get_queue(queue).put(
ensure_bytes(dumps(message)),
priority=self._get_message_priority(message, reverse=True),
)
def _get(self, queue):
queue = self._get_queue(queue)
msg = queue.get()
if msg is None:
raise Empty()
return loads(bytes_to_str(msg))
def _purge(self, queue):
count = 0
queue = self._get_queue(queue)
while True:
msg = queue.get()
if msg is None:
break
count += 1
return count
def _delete(self, queue, *args, **kwargs):
if self._has_queue(queue):
self._purge(queue)
self.client.delete(self._get_path(queue))
def _size(self, queue):
queue = self._get_queue(queue)
return len(queue)
def _new_queue(self, queue, **kwargs):
if not self._has_queue(queue):
queue = self._get_queue(queue)
def _has_queue(self, queue):
return self.client.exists(self._get_path(queue)) is not None
def _open(self):
conninfo = self.connection.client
hosts = []
if conninfo.alt:
for host_port in conninfo.alt:
if host_port.startswith('zookeeper://'):
host_port = host_port[len('zookeeper://'):]
if not host_port:
continue
try:
host, port = host_port.split(':', 1)
host_port = (host, int(port))
except ValueError:
if host_port == conninfo.hostname:
host_port = (host_port, conninfo.port or DEFAULT_PORT)
else:
host_port = (host_port, DEFAULT_PORT)
hosts.append(host_port)
host_port = (conninfo.hostname, conninfo.port or DEFAULT_PORT)
if host_port not in hosts:
hosts.insert(0, host_port)
conn_str = ','.join([f'{h}:{p}' for h, p in hosts])
conn = KazooClient(conn_str)
conn.start()
return conn
@property
def client(self):
if self._client is None:
self._client = self._open()
return self._client
class Transport(virtual.Transport):
"""Zookeeper Transport."""
Channel = Channel
polling_interval = 1
default_port = DEFAULT_PORT
connection_errors = (
virtual.Transport.connection_errors + KZ_CONNECTION_ERRORS
)
channel_errors = (
virtual.Transport.channel_errors + KZ_CHANNEL_ERRORS
)
driver_type = 'zookeeper'
driver_name = 'kazoo'
def __init__(self, *args, **kwargs):
if kazoo is None:
raise ImportError('The kazoo library is not installed')
super().__init__(*args, **kwargs)
def driver_version(self):
return kazoo.__version__
|
import base64
import json
import logging
import threading
from absl import flags
from perfkitbenchmarker import disk
from perfkitbenchmarker import linux_virtual_machine
from perfkitbenchmarker import providers
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.alicloud import ali_disk
from perfkitbenchmarker.providers.alicloud import ali_network
from perfkitbenchmarker.providers.alicloud import util
import six
FLAGS = flags.FLAGS
NON_HVM_PREFIXES = ['t1', 's1', 's2', 's3', 'm1']
DRIVE_START_LETTER = 'b'
DEFAULT_DISK_SIZE = 500
INSTANCE = 'instance'
IMAGE = 'image'
SNAPSHOT = 'snapshot'
DISK = 'disk'
NONE = 'none'
IO_OPTIMIZED = 'io_optimized'
RESOURCE_TYPE = {
INSTANCE: 'instance',
IMAGE: 'image',
SNAPSHOT: 'snapshot',
DISK: 'disk',
}
SSH_PORT = 22
NUM_LOCAL_VOLUMES = {
'ecs.t1.small': 4,
'ecs.s1.small': 4,
'ecs.s1.medium': 4,
'ecs.s2.small': 4,
'ecs.s2.large': 4,
'ecs.s2.xlarge': 4,
'ecs.s3.medium': 4,
'ecs.s3.large': 4,
'ecs.m1.medium': 4,
}
INSTANCE_EXISTS_STATUSES = frozenset(
['Starting', 'Running', 'Stopping', 'Stopped'])
INSTANCE_DELETED_STATUSES = frozenset([])
INSTANCE_KNOWN_STATUSES = INSTANCE_EXISTS_STATUSES | INSTANCE_DELETED_STATUSES
class AliVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing an AliCloud Virtual Machine."""
CLOUD = providers.ALICLOUD
DEFAULT_ZONE = 'cn-hangzhou-d'
DEFAULT_MACHINE_TYPE = 'ecs.s3.large'
_lock = threading.Lock()
imported_keyfile_set = set()
deleted_keyfile_set = set()
def __init__(self, vm_spec):
"""Initialize a AliCloud virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the VM.
"""
super(AliVirtualMachine, self).__init__(vm_spec)
self.image = FLAGS.image
self.user_name = FLAGS.ali_user_name
self.key_pair_name = None
self.region = util.GetRegionByZone(self.zone)
self.bandwidth_in = FLAGS.ali_bandwidth_in
self.bandwidth_out = FLAGS.ali_bandwidth_out
self.scratch_disk_size = FLAGS.scratch_disk_size or DEFAULT_DISK_SIZE
self.system_disk_type = FLAGS.ali_system_disk_type
self.system_disk_size = FLAGS.ali_system_disk_size
self.eip_address_bandwidth = FLAGS.ali_eip_address_bandwidth
self.network = ali_network.AliNetwork.GetNetwork(self)
self.firewall = ali_network.AliFirewall.GetFirewall()
@vm_util.Retry(poll_interval=1, log_errors=False)
def _WaitForInstanceStatus(self, status_list):
"""Waits until the instance's status is in status_list"""
logging.info('Waits until the instance\'s status is one of statuses: %s',
status_list)
describe_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeInstances',
'--RegionId %s' % self.region,
'--InstanceIds \'["%s"]\'' % self.id]
describe_cmd = util.GetEncodedCmd(describe_cmd)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
instances = response['Instances']['Instance']
assert len(instances) == 1
status = instances[0]['Status']
assert status in status_list
@vm_util.Retry(poll_interval=5, max_retries=30, log_errors=False)
def _WaitForEipStatus(self, status_list):
"""Waits until the instance's status is in status_list"""
logging.info('Waits until the eip\'s status is one of statuses: %s',
status_list)
describe_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeEipAddresses',
'--RegionId %s' % self.region,
'--AllocationId %s' % self.eip_id]
describe_cmd = util.GetEncodedCmd(describe_cmd)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
EipAddresses = response['EipAddresses']['EipAddress']
assert len(EipAddresses) == 1
status = EipAddresses[0]['Status']
assert status in status_list
def _AllocatePubIp(self, region, instance_id):
"""Allocate a public ip address and associate it to the instance"""
if FLAGS.ali_use_vpc:
allocatip_cmd = util.ALI_PREFIX + [
'ecs',
'AllocateEipAddress',
'--RegionId %s' % region,
'--InternetChargeType PayByTraffic',
'--Bandwidth %s' % self.eip_address_bandwidth]
allocatip_cmd = util.GetEncodedCmd(allocatip_cmd)
stdout, _ = vm_util.IssueRetryableCommand(allocatip_cmd)
response = json.loads(stdout)
self.ip_address = response['EipAddress']
self.eip_id = response['AllocationId']
self._WaitForInstanceStatus(['Stopped', 'Running'])
associate_cmd = util.ALI_PREFIX + [
'ecs',
'AssociateEipAddress',
'--RegionId %s' % region,
'--AllocationId %s' % self.eip_id,
'--InstanceId %s' % instance_id,
'--InstanceType EcsInstance']
associate_cmd = util.GetEncodedCmd(associate_cmd)
vm_util.IssueRetryableCommand(associate_cmd)
else:
allocatip_cmd = util.ALI_PREFIX + [
'ecs',
'AllocatePublicIpAddress',
'--RegionId %s' % region,
'--InstanceId %s' % instance_id]
allocatip_cmd = util.GetEncodedCmd(allocatip_cmd)
stdout, _ = vm_util.IssueRetryableCommand(allocatip_cmd)
response = json.loads(stdout)
self.ip_address = response['IpAddress']
@classmethod
def _GetDefaultImage(cls, region):
"""Returns the default image given the machine type and region.
If no default is configured, this will return None.
"""
if cls.IMAGE_NAME_FILTER is None:
return None
describe_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeImages',
'--RegionId %s' % region,
'--ImageName \'%s\'' % cls.IMAGE_NAME_FILTER]
describe_cmd = util.GetEncodedCmd(describe_cmd)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
if not stdout:
return None
images = json.loads(stdout)['Images']['Image']
# We want to return the latest version of the image, and since the wildcard
# portion of the image name is the image's creation date, we can just take
# the image with the 'largest' name.
return max(images, key=lambda image: image['ImageName'])['ImageId']
@vm_util.Retry()
def _PostCreate(self):
"""Get the instance's data and tag it."""
describe_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeInstances',
'--RegionId %s' % self.region,
'--InstanceIds \'["%s"]\'' % self.id]
logging.info('Getting instance %s public IP. This will fail until '
'a public IP is available, but will be retried.', self.id)
describe_cmd = util.GetEncodedCmd(describe_cmd)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
instance = response['Instances']['Instance'][0]
if self.network.use_vpc:
pub_ip_address = instance['EipAddress']['IpAddress']
self.internal_ip = \
instance['VpcAttributes']['PrivateIpAddress']['IpAddress'][0]
else:
pub_ip_address = instance['PublicIpAddress']['IpAddress'][0]
self.internal_ip = instance['InnerIpAddress']['IpAddress'][0]
assert self.ip_address == pub_ip_address
self.group_id = instance['SecurityGroupIds']['SecurityGroupId'][0]
self._WaitForInstanceStatus(['Running'])
self.firewall.AllowPort(self, SSH_PORT)
tags = {}
tags.update(self.vm_metadata)
util.AddTags(self.id, RESOURCE_TYPE[INSTANCE], self.region, **tags)
util.AddDefaultTags(self.id, RESOURCE_TYPE[INSTANCE], self.region)
def _CreateDependencies(self):
"""Create VM dependencies."""
self.key_pair_name = AliCloudKeyFileManager.ImportKeyfile(self.region)
def _DeleteDependencies(self):
"""Delete VM dependencies."""
if self.key_pair_name:
AliCloudKeyFileManager.DeleteKeyfile(self.region, self.key_pair_name)
def _Create(self):
"""Create a VM instance."""
if self.image is None:
# This is here and not in the __init__ method bceauese _GetDefaultImage
# does a nontrivial amount of work (it calls the aliyuncli).
self.image = self._GetDefaultImage(self.region)
create_cmd = util.ALI_PREFIX + [
'ecs',
'CreateInstance',
'--InstanceName perfkit-%s' % FLAGS.run_uri,
'--RegionId %s' % self.region,
'--ZoneId %s' % self.zone,
'--ImageId %s' % self.image,
'--InstanceType %s' % self.machine_type,
'--SecurityGroupId %s' % self.network.security_group.group_id,
'--KeyPairName %s' % self.key_pair_name,
'--SystemDisk.Category %s' % self.system_disk_type,
'--SystemDisk.Size %s' % self.system_disk_size]
if FLAGS.scratch_disk_type == disk.LOCAL:
disk_cmd = [
'--DataDisk1Category ephemeral_ssd',
'--DataDisk1Size %s' % self.scratch_disk_size,
'--DataDisk1Device %s%s' % (util.GetDrivePathPrefix(),
DRIVE_START_LETTER)]
create_cmd.extend(disk_cmd)
if FLAGS.ali_io_optimized is not None:
create_cmd.extend(['--IoOptimized optimized'])
if FLAGS.ali_use_vpc:
create_cmd.extend(['--VSwitchId %s' % self.network.vswitch.id])
else:
create_cmd.extend([
'--InternetChargeType PayByTraffic',
'--InternetMaxBandwidthIn %s' % self.bandwidth_in,
'--InternetMaxBandwidthOut %s' % self.bandwidth_out])
# Create user and add SSH key
public_key = AliCloudKeyFileManager.GetPublicKey()
user_data = util.ADD_USER_TEMPLATE.format(user_name=self.user_name,
public_key=public_key)
logging.debug('encoding startup script: %s', user_data)
create_cmd.extend(['--UserData', six.ensure_str(
base64.b64encode(user_data.encode('utf-8')))])
create_cmd = util.GetEncodedCmd(create_cmd)
stdout, _ = vm_util.IssueRetryableCommand(create_cmd)
response = json.loads(stdout)
self.id = response['InstanceId']
self._AllocatePubIp(self.region, self.id)
start_cmd = util.ALI_PREFIX + [
'ecs',
'StartInstance',
'--InstanceId %s' % self.id]
start_cmd = util.GetEncodedCmd(start_cmd)
vm_util.IssueRetryableCommand(start_cmd)
def _Delete(self):
"""Delete a VM instance."""
stop_cmd = util.ALI_PREFIX + [
'ecs',
'StopInstance',
'--InstanceId %s' % self.id]
stop_cmd = util.GetEncodedCmd(stop_cmd)
vm_util.IssueRetryableCommand(stop_cmd)
self._WaitForInstanceStatus(['Stopped'])
delete_cmd = util.ALI_PREFIX + [
'ecs',
'DeleteInstance',
'--InstanceId %s' % self.id]
delete_cmd = util.GetEncodedCmd(delete_cmd)
vm_util.IssueRetryableCommand(delete_cmd)
if FLAGS.ali_use_vpc:
self._WaitForEipStatus(['Available'])
release_eip_cmd = util.ALI_PREFIX + [
'ecs',
'ReleaseEipAddress',
'--RegionId %s' % self.region,
'--AllocationId %s' % self.eip_id]
release_eip_cmd = util.GetEncodedCmd(release_eip_cmd)
vm_util.IssueRetryableCommand(release_eip_cmd)
def _Exists(self):
"""Returns true if the VM exists."""
describe_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeInstances',
'--RegionId %s' % self.region,
'--InstanceIds \'["%s"]\'' % str(self.id)]
describe_cmd = util.GetEncodedCmd(describe_cmd)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
instances = response['Instances']['Instance']
assert len(instances) < 2, 'Too many instances.'
if not instances:
return False
assert len(instances) == 1, 'Wrong number of instances.'
status = instances[0]['Status']
assert status in INSTANCE_KNOWN_STATUSES, status
return status in INSTANCE_EXISTS_STATUSES
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
data_disk = ali_disk.AliDisk(disk_spec, self.zone)
self.scratch_disks.append(data_disk)
if disk_spec.disk_type != disk.LOCAL:
data_disk.Create()
data_disk.Attach(self)
data_disk.WaitForDiskStatus(['In_use'])
else:
data_disk.device_letter = DRIVE_START_LETTER
self.FormatDisk(data_disk.GetDevicePath(), disk_spec.disk_type)
self.MountDisk(data_disk.GetDevicePath(), disk_spec.mount_point,
disk_spec.disk_type, data_disk.mount_options,
data_disk.fstab_options)
def AddMetadata(self, **kwargs):
"""Adds metadata to the VM."""
util.AddTags(self.id, RESOURCE_TYPE[INSTANCE], self.region, **kwargs)
class AliCloudKeyFileManager(object):
"""Object for managing AliCloud Keyfiles."""
_lock = threading.Lock()
imported_keyfile_set = set()
deleted_keyfile_set = set()
run_uri_key_names = {}
@classmethod
def ImportKeyfile(cls, region):
"""Imports the public keyfile to AliCloud."""
with cls._lock:
if FLAGS.run_uri in cls.run_uri_key_names:
return cls.run_uri_key_names[FLAGS.run_uri]
public_key = cls.GetPublicKey()
key_name = cls.GetKeyNameForRun()
import_cmd = util.ALI_PREFIX + [
'ecs',
'ImportKeyPair',
'--RegionId', region,
'--KeyPairName', key_name,
'--PublicKeyBody', json.dumps(public_key)]
vm_util.IssueRetryableCommand(import_cmd)
cls.run_uri_key_names[FLAGS.run_uri] = key_name
return key_name
@classmethod
def DeleteKeyfile(cls, region, key_name):
"""Deletes the imported KeyPair for a run_uri."""
with cls._lock:
if FLAGS.run_uri not in cls.run_uri_key_names:
return
delete_cmd = util.ALI_PREFIX + [
'ecs',
'DeleteKeyPairs',
'--RegionId', region,
'--KeyPairNames', json.dumps([key_name])]
vm_util.IssueRetryableCommand(delete_cmd)
del cls.run_uri_key_names[FLAGS.run_uri]
@classmethod
def GetKeyNameForRun(cls):
return 'perfkit_key_{0}'.format(FLAGS.run_uri)
@classmethod
def GetPublicKey(cls):
cat_cmd = ['cat',
vm_util.GetPublicKeyPath()]
keyfile, _ = vm_util.IssueRetryableCommand(cat_cmd)
return keyfile.strip()
class Ubuntu1604BasedAliVirtualMachine(AliVirtualMachine,
linux_virtual_machine.Ubuntu1604Mixin):
IMAGE_NAME_FILTER = 'ubuntu_16_04_64*alibase*.vhd'
PYTHON_PIP_PACKAGE_VERSION = '9.0.3'
class Ubuntu1804BasedAliVirtualMachine(AliVirtualMachine,
linux_virtual_machine.Ubuntu1804Mixin):
IMAGE_NAME_FILTER = 'ubuntu_18_04_64*alibase*.vhd'
class CentOs7BasedAliVirtualMachine(AliVirtualMachine,
linux_virtual_machine.CentOs7Mixin):
IMAGE_NAME_FILTER = 'centos_7_05_64*alibase*.vhd'
|
from datetime import date, datetime, time, timedelta
from pygal import DateLine, DateTimeLine, TimeDeltaLine, TimeLine
from pygal._compat import timestamp, utc
from pygal.test.utils import texts
def test_date():
"""Test a simple dateline"""
date_chart = DateLine(truncate_label=1000)
date_chart.add(
'dates', [(date(2013, 1, 2), 300), (date(2013, 1, 12), 412),
(date(2013, 2, 2), 823), (date(2013, 2, 22), 672)]
)
q = date_chart.render_pyquery()
dates = list(map(lambda t: t.split(' ')[0], q(".axis.x text").map(texts)))
assert dates == ['2013-01-12', '2013-01-24', '2013-02-04', '2013-02-16']
def test_time():
"""Test a simple timeline"""
time_chart = TimeLine(truncate_label=1000)
time_chart.add(
'times', [(time(1, 12, 29), 2), (time(21, 2, 29), 10),
(time(12, 30, 59), 7)]
)
q = time_chart.render_pyquery()
dates = list(map(lambda t: t.split(' ')[0], q(".axis.x text").map(texts)))
assert dates == [
'02:46:40', '05:33:20', '08:20:00', '11:06:40', '13:53:20', '16:40:00',
'19:26:40'
]
def test_datetime():
"""Test a simple datetimeline"""
datetime_chart = DateTimeLine(truncate_label=1000)
datetime_chart.add(
'datetimes',
[(datetime(2013, 1, 2, 1, 12, 29), 300),
(datetime(2013, 1, 12, 21, 2, 29), 412),
(datetime(2013, 2, 2, 12, 30, 59), 823), (datetime(2013, 2, 22), 672)]
)
q = datetime_chart.render_pyquery()
dates = list(map(lambda t: t.split(' ')[0], q(".axis.x text").map(texts)))
assert dates == [
'2013-01-12T14:13:20', '2013-01-24T04:00:00', '2013-02-04T17:46:40',
'2013-02-16T07:33:20'
]
def test_timedelta():
"""Test a simple timedeltaline"""
timedelta_chart = TimeDeltaLine(truncate_label=1000)
timedelta_chart.add(
'timedeltas', [
(timedelta(seconds=1), 10),
(timedelta(weeks=1), 50),
(timedelta(hours=3, seconds=30), 3),
(timedelta(microseconds=12112), .3),
]
)
q = timedelta_chart.render_pyquery()
assert list(t for t in q(".axis.x text").map(texts) if t != '0:00:00') == [
'1 day, 3:46:40', '2 days, 7:33:20', '3 days, 11:20:00',
'4 days, 15:06:40', '5 days, 18:53:20', '6 days, 22:40:00'
]
def test_date_xrange():
"""Test dateline with xrange"""
datey = DateLine(truncate_label=1000)
datey.add(
'dates', [(date(2013, 1, 2), 300), (date(2013, 1, 12), 412),
(date(2013, 2, 2), 823), (date(2013, 2, 22), 672)]
)
datey.xrange = (date(2013, 1, 1), date(2013, 3, 1))
q = datey.render_pyquery()
dates = list(map(lambda t: t.split(' ')[0], q(".axis.x text").map(texts)))
assert dates == [
'2013-01-01', '2013-01-12', '2013-01-24', '2013-02-04', '2013-02-16',
'2013-02-27'
]
def test_date_labels():
"""Test dateline with xrange"""
datey = DateLine(truncate_label=1000)
datey.add(
'dates', [(date(2013, 1, 2), 300), (date(2013, 1, 12), 412),
(date(2013, 2, 2), 823), (date(2013, 2, 22), 672)]
)
datey.x_labels = [date(2013, 1, 1), date(2013, 2, 1), date(2013, 3, 1)]
q = datey.render_pyquery()
dates = list(map(lambda t: t.split(' ')[0], q(".axis.x text").map(texts)))
assert dates == ['2013-01-01', '2013-02-01', '2013-03-01']
def test_utc_timestamping():
assert timestamp(datetime(2017, 7, 14, 2,
40).replace(tzinfo=utc)) == 1500000000
for d in (datetime.now(), datetime.utcnow(), datetime(
1999, 12, 31, 23, 59, 59), datetime(2000, 1, 1, 0, 0, 0)):
assert datetime.utcfromtimestamp(timestamp(d)
) - d < timedelta(microseconds=10)
|
import io
from locale import getpreferredencoding
import logging
import os
import re
import shutil
import stat
import sys
import tempfile
import unittest
from subprocess import run, CalledProcessError
ENCODING = getpreferredencoding() or 'utf-8'
class TestBase(unittest.TestCase):
def setUp(self, prefix_for_tmp_repo):
"""Creates temporary dir and cds to it."""
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
self.path = tempfile.mkdtemp(prefix=prefix_for_tmp_repo)
logging.debug('Created temporary directory {0}'.format(self.path))
os.chdir(self.path)
def tearDown(self):
"""Removes the temporary dir."""
rmtree(self.path)
def assertRaisesRegexp(self, exc, r, fun, *args, **kwargs):
try:
fun(*args, **kwargs)
self.fail('Exception not raised')
except exc as e:
msg = e.stderr if isinstance(e, CalledProcessError) else str(e)
if not re.search(r, msg):
self.fail('No "{0}" found in "{1}"'.format(r, msg))
def rmtree(path):
# On Windows, running shutil.rmtree on a folder that contains read-only
# files throws errors. To workaround this, if removing a path fails, we make
# the path writable and then try again
def onerror(func, path, unused_exc_info): # error handler for rmtree
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
# Swallow errors for now (on Windows there seems to be something weird
# going on and we can't remove the temp directory even after all files
# in it have been successfully removed)
pass
shutil.rmtree(path, onerror=onerror)
logging.debug('Removed dir {0}'.format(path))
def symlink(src, dst):
try:
os.symlink(src, dst)
except (AttributeError, NotImplementedError, OSError):
# Swallow the exceptions, because Windows is very weird about creating
# symlinks. Python 2 does not have a symlink method on in the os module,
# AttributeError will handle that. Python 3 does have a symlink method in
# the os module, however, it has some quirks. NotImplementedError handles
# the case where the Windows version is prior to Vista. OSError handles the
# case where python doesn't have permissions to create a symlink on
# windows. In all cases, it's not necessary to test this, so skip it.
# See: https://docs.python.org/3.5/library/os.html#os.symlink and
# https://docs.python.org/2.7/library/os.html#os.symlink for full details.
pass
def write_file(fp, contents=''):
_x_file('w', fp, contents=contents)
def append_to_file(fp, contents=''):
_x_file('a', fp, contents=contents)
def set_test_config():
git('config', 'user.name', 'test')
git('config', 'user.email', '[email protected]')
def read_file(fp):
with io.open(fp, mode='r', encoding=ENCODING) as f:
ret = f.read()
return ret
def git(*args, cwd=None, _in=None):
p = run(
['git', '--no-pager', *args], capture_output=True, check=True, cwd=cwd,
input=_in, encoding=ENCODING)
return p.stdout
def gl(*args, cwd=None, _in=None):
p = run(
['gl', *args], capture_output=True, check=True, cwd=cwd,
input=_in, encoding=ENCODING)
return p.stdout
# Private functions
def _x_file(x, fp, contents=''):
if not contents:
contents = fp
dirs, _ = os.path.split(fp)
if dirs and not os.path.exists(dirs):
os.makedirs(dirs)
with io.open(fp, mode=x, encoding=ENCODING) as f:
f.write(contents)
|
import sys
from urllib.parse import urlencode
import pytest
import pytest_httpbin.certs
import vcr
from assertions import assert_cassette_has_one_response
httplib2 = pytest.importorskip("httplib2")
def http():
"""
Returns an httplib2 HTTP instance
with the certificate replaced by the httpbin one.
"""
kwargs = {"ca_certs": pytest_httpbin.certs.where()}
if sys.version_info[:2] in [(2, 7), (3, 7)]:
kwargs["disable_ssl_certificate_validation"] = True
return httplib2.Http(**kwargs)
def test_response_code(tmpdir, httpbin_both):
"""Ensure we can read a response code from a fetch"""
url = httpbin_both.url
with vcr.use_cassette(str(tmpdir.join("atts.yaml"))):
resp, _ = http().request(url)
code = resp.status
with vcr.use_cassette(str(tmpdir.join("atts.yaml"))):
resp, _ = http().request(url)
assert code == resp.status
def test_random_body(httpbin_both, tmpdir):
"""Ensure we can read the content, and that it's served from cache"""
url = httpbin_both.url + "/bytes/1024"
with vcr.use_cassette(str(tmpdir.join("body.yaml"))):
_, content = http().request(url)
body = content
with vcr.use_cassette(str(tmpdir.join("body.yaml"))):
_, content = http().request(url)
assert body == content
def test_response_headers(tmpdir, httpbin_both):
"""Ensure we can get information from the response"""
url = httpbin_both.url
with vcr.use_cassette(str(tmpdir.join("headers.yaml"))):
resp, _ = http().request(url)
headers = resp.items()
with vcr.use_cassette(str(tmpdir.join("headers.yaml"))):
resp, _ = http().request(url)
assert set(headers) == set(resp.items())
def test_effective_url(tmpdir, httpbin_both):
"""Ensure that the effective_url is captured"""
url = httpbin_both.url + "/redirect-to?url=/html"
with vcr.use_cassette(str(tmpdir.join("headers.yaml"))):
resp, _ = http().request(url)
effective_url = resp["content-location"]
assert effective_url == httpbin_both + "/html"
with vcr.use_cassette(str(tmpdir.join("headers.yaml"))):
resp, _ = http().request(url)
assert effective_url == resp["content-location"]
def test_multiple_requests(tmpdir, httpbin_both):
"""Ensure that we can cache multiple requests"""
urls = [httpbin_both.url, httpbin_both.url, httpbin_both.url + "/get", httpbin_both.url + "/bytes/1024"]
with vcr.use_cassette(str(tmpdir.join("multiple.yaml"))) as cass:
[http().request(url) for url in urls]
assert len(cass) == len(urls)
def test_get_data(tmpdir, httpbin_both):
"""Ensure that it works with query data"""
data = urlencode({"some": 1, "data": "here"})
url = httpbin_both.url + "/get?" + data
with vcr.use_cassette(str(tmpdir.join("get_data.yaml"))):
_, res1 = http().request(url)
with vcr.use_cassette(str(tmpdir.join("get_data.yaml"))):
_, res2 = http().request(url)
assert res1 == res2
def test_post_data(tmpdir, httpbin_both):
"""Ensure that it works when posting data"""
data = urlencode({"some": 1, "data": "here"})
url = httpbin_both.url + "/post"
with vcr.use_cassette(str(tmpdir.join("post_data.yaml"))):
_, res1 = http().request(url, "POST", data)
with vcr.use_cassette(str(tmpdir.join("post_data.yaml"))) as cass:
_, res2 = http().request(url, "POST", data)
assert res1 == res2
assert_cassette_has_one_response(cass)
def test_post_unicode_data(tmpdir, httpbin_both):
"""Ensure that it works when posting unicode data"""
data = urlencode({"snowman": "☃".encode()})
url = httpbin_both.url + "/post"
with vcr.use_cassette(str(tmpdir.join("post_data.yaml"))):
_, res1 = http().request(url, "POST", data)
with vcr.use_cassette(str(tmpdir.join("post_data.yaml"))) as cass:
_, res2 = http().request(url, "POST", data)
assert res1 == res2
assert_cassette_has_one_response(cass)
def test_cross_scheme(tmpdir, httpbin, httpbin_secure):
"""Ensure that requests between schemes are treated separately"""
# First fetch a url under https, and then again under https and then
# ensure that we haven't served anything out of cache, and we have two
# requests / response pairs in the cassette
with vcr.use_cassette(str(tmpdir.join("cross_scheme.yaml"))) as cass:
http().request(httpbin_secure.url)
http().request(httpbin.url)
assert len(cass) == 2
assert cass.play_count == 0
def test_decorator(tmpdir, httpbin_both):
"""Test the decorator version of VCR.py"""
url = httpbin_both.url
@vcr.use_cassette(str(tmpdir.join("atts.yaml")))
def inner1():
resp, _ = http().request(url)
return resp["status"]
@vcr.use_cassette(str(tmpdir.join("atts.yaml")))
def inner2():
resp, _ = http().request(url)
return resp["status"]
assert inner1() == inner2()
|
from __future__ import division
import numpy as np
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainercv.experimental.links.model.fcis.utils.proposal_target_creator \
import ProposalTargetCreator
from chainercv.links.model.faster_rcnn.faster_rcnn_train_chain \
import _fast_rcnn_loc_loss
from chainercv.links.model.faster_rcnn.utils.anchor_target_creator \
import AnchorTargetCreator
class FCISTrainChain(chainer.Chain):
"""Calculate losses for FCIS and report them.
This is used to train FCIS in the joint training scheme [#FCISCVPR]_.
The losses include:
* :obj:`rpn_loc_loss`: The localization loss for \
Region Proposal Network (RPN).
* :obj:`rpn_cls_loss`: The classification loss for RPN.
* :obj:`roi_loc_loss`: The localization loss for the head module.
* :obj:`roi_cls_loss`: The classification loss for the head module.
* :obj:`roi_mask_loss`: The mask loss for the head module.
.. [#FCISCVPR] Yi Li, Haozhi Qi, Jifeng Dai, Xiangyang Ji, Yichen Wei. \
Fully Convolutional Instance-aware Semantic Segmentation. CVPR 2017.
Args:
fcis (~chainercv.experimental.links.model.fcis.FCIS):
A FCIS model for training.
rpn_sigma (float): Sigma parameter for the localization loss
of Region Proposal Network (RPN). The default value is 3,
which is the value used in [#FCISCVPR]_.
roi_sigma (float): Sigma paramter for the localization loss of
the head. The default value is 1, which is the value used
in [#FCISCVPR]_.
anchor_target_creator: An instantiation of
:class:`~chainercv.links.model.faster_rcnn.AnchorTargetCreator`.
proposal_target_creator: An instantiation of
:class:`~chainercv.experimental.links.model.fcis.ProposalTargetCreator`.
"""
def __init__(
self, fcis,
rpn_sigma=3.0, roi_sigma=1.0,
anchor_target_creator=AnchorTargetCreator(),
proposal_target_creator=ProposalTargetCreator()
):
super(FCISTrainChain, self).__init__()
with self.init_scope():
self.fcis = fcis
self.rpn_sigma = rpn_sigma
self.roi_sigma = roi_sigma
self.mask_size = self.fcis.head.roi_size
self.loc_normalize_mean = fcis.loc_normalize_mean
self.loc_normalize_std = fcis.loc_normalize_std
self.anchor_target_creator = anchor_target_creator
self.proposal_target_creator = proposal_target_creator
def forward(self, imgs, masks, labels, bboxes, scale):
"""Forward FCIS and calculate losses.
Here are notations used.
* :math:`N` is the batch size.
* :math:`R` is the number of bounding boxes per image.
* :math:`H` is the image height.
* :math:`W` is the image width.
Currently, only :math:`N=1` is supported.
Args:
imgs (~chainer.Variable): A variable with a batch of images.
masks (~chainer.Variable): A batch of masks.
Its shape is :math:`(N, R, H, W)`.
labels (~chainer.Variable): A batch of labels.
Its shape is :math:`(N, R)`. The background is excluded from
the definition, which means that the range of the value
is :math:`[0, L - 1]`. :math:`L` is the number of foreground
classes.
bboxes (~chainer.Variable): A batch of bounding boxes.
Its shape is :math:`(N, R, 4)`.
scale (float or ~chainer.Variable): Amount of scaling applied to
the raw image during preprocessing.
Returns:
chainer.Variable:
Scalar loss variable.
This is the sum of losses for Region Proposal Network and
the head module.
"""
if isinstance(masks, chainer.Variable):
masks = masks.array
if isinstance(labels, chainer.Variable):
labels = labels.array
if isinstance(bboxes, chainer.Variable):
bboxes = bboxes.array
if isinstance(scale, chainer.Variable):
scale = scale.array
scale = scale.item()
n = masks.shape[0]
# batch size = 1
if n != 1:
raise ValueError('Currently only batch size 1 is supported.')
_, _, H, W = imgs.shape
img_size = (H, W)
assert img_size == masks.shape[2:]
rpn_features, roi_features = self.fcis.extractor(imgs)
rpn_locs, rpn_scores, rois, roi_indices, anchor = self.fcis.rpn(
rpn_features, img_size, scale)
# batch size = 1
mask = masks[0]
label = labels[0]
bbox = bboxes[0]
rpn_score = rpn_scores[0]
rpn_loc = rpn_locs[0]
roi = rois
# Sample RoIs and forward
sample_roi, gt_roi_mask, gt_roi_label, gt_roi_loc = \
self.proposal_target_creator(
roi, mask, label, bbox, self.loc_normalize_mean,
self.loc_normalize_std, self.mask_size)
sample_roi_index = self.xp.zeros(
(len(sample_roi),), dtype=np.int32)
roi_ag_seg_score, roi_ag_loc, roi_cls_score, _, _ = self.fcis.head(
roi_features, sample_roi, sample_roi_index, img_size, gt_roi_label)
# RPN losses
gt_rpn_loc, gt_rpn_label = self.anchor_target_creator(
bbox, anchor, img_size)
# CPU -> GPU
if cuda.get_array_module(rpn_loc.array) != np:
gt_rpn_loc = cuda.to_gpu(gt_rpn_loc)
gt_rpn_label = cuda.to_gpu(gt_rpn_label)
rpn_loc_loss = _fast_rcnn_loc_loss(
rpn_loc, gt_rpn_loc, gt_rpn_label, self.rpn_sigma)
rpn_cls_loss = F.softmax_cross_entropy(rpn_score, gt_rpn_label)
# Losses for outputs of the head
n_roi = roi_ag_loc.shape[0]
gt_roi_fg_label = (gt_roi_label > 0).astype(np.int)
roi_loc = roi_ag_loc[self.xp.arange(n_roi), gt_roi_fg_label]
roi_loc_loss = _fast_rcnn_loc_loss(
roi_loc, gt_roi_loc, gt_roi_label, self.roi_sigma)
roi_cls_loss = F.softmax_cross_entropy(roi_cls_score, gt_roi_label)
# normalize by every (valid and invalid) instances
roi_mask_loss = F.softmax_cross_entropy(
roi_ag_seg_score, gt_roi_mask, normalize=False) \
* 10.0 / self.mask_size / self.mask_size
loss = rpn_loc_loss + rpn_cls_loss \
+ roi_loc_loss + roi_cls_loss + roi_mask_loss
chainer.reporter.report({
'rpn_loc_loss': rpn_loc_loss,
'rpn_cls_loss': rpn_cls_loss,
'roi_loc_loss': roi_loc_loss,
'roi_cls_loss': roi_cls_loss,
'roi_mask_loss': roi_mask_loss,
'loss': loss,
}, self)
return loss
|
from docker_registry.core import driver as driveengine
from docker_registry import testing
# Mock any boto
from docker_registry.testing import mock_boto # noqa
# Mock our s3 - xxx this smells like byte-range support is questionnable...
from . import mock_s3 # noqa
def getinit(name):
def init(self):
self.scheme = name
self.path = ''
self.config = testing.Config({})
return init
for name in driveengine.available():
# The globals shenanigan is required so that the test tool find the tests
# The dynamic type declaration is required because it is so
globals()['TestQuery%s' % name] = type('TestQuery%s' % name,
(testing.Query,),
dict(__init__=getinit(name)))
globals()['TestDriver%s' % name] = type('TestDriver%s' % name,
(testing.Driver,),
dict(__init__=getinit(name)))
|
try:
import torch
# Check if GPU is available
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
dtype = torch.float
except ImportError: # pragma: no cover
torch = None
device = None
dtype = None
# This module initializes flags for optional dependencies
try:
import pandas
HAS_PANDAS = True
except ImportError: # pragma: no cover
HAS_PANDAS = False
pandas = None
# Set a global variable whether to show progress bar or not.
SHOW_PROGRESS = True
def no_progress():
"""
If called sets the global variable `SHOW_PROGRESS` to False resulting in no
progress bars anywhere.
"""
global SHOW_PROGRESS
SHOW_PROGRESS = False
|
from markups import ReStructuredTextMarkup
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialog, QDialogButtonBox, QGridLayout, QLabel, \
QSpinBox
class InsertTableDialog(QDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.parent = parent
self.setWindowTitle(self.tr('Insert table'))
buttonBox = QDialogButtonBox(self)
buttonBox.setStandardButtons(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.makeTable)
buttonBox.rejected.connect(self.close)
layout = QGridLayout(self)
rowsLabel = QLabel(self.tr('Number of rows') + ':', self)
columnsLabel = QLabel(self.tr('Number of columns') + ':', self)
self.rowsSpinBox = QSpinBox(self)
self.columnsSpinBox = QSpinBox(self)
self.rowsSpinBox.setRange(1, 10)
self.columnsSpinBox.setRange(1, 10)
self.rowsSpinBox.setValue(3)
self.columnsSpinBox.setValue(3)
layout.addWidget(rowsLabel, 0, 0)
layout.addWidget(self.rowsSpinBox, 0, 1, Qt.AlignRight)
layout.addWidget(columnsLabel, 1, 0)
layout.addWidget(self.columnsSpinBox, 1, 1, Qt.AlignRight)
layout.addWidget(buttonBox, 2, 0, 1, 2)
def makeTable(self):
rowsCount = self.rowsSpinBox.value()
columnsCount = self.columnsSpinBox.value() + 1
tab = self.parent.currentTab
cursor = tab.editBox.textCursor()
tableCode = '' if cursor.atBlockStart() else '\n\n'
if tab.activeMarkupClass == ReStructuredTextMarkup:
# Insert reStructuredText grid table
tableCode += '-----'.join('+' * columnsCount) + '\n'
tableCode += ' '.join('|' * columnsCount) + '\n'
tableCode += '====='.join('+' * columnsCount) + '\n'
tableCode += (' '.join('|' * columnsCount) + '\n' +
'-----'.join('+' * columnsCount) + '\n') * rowsCount
else:
# Insert Markdown table
tableCode += ' '.join('|' * columnsCount) + '\n'
tableCode += '-----'.join('|' * columnsCount) + '\n'
tableCode += (' '.join('|' * columnsCount) + '\n') * rowsCount
cursor.insertText(tableCode)
self.close()
# Activate the Table editing mode
self.parent.actionTableMode.setChecked(True)
tab.editBox.tableModeEnabled = True
|
import pytest
from homeassistant.components.rfxtrx import DOMAIN
from homeassistant.components.rfxtrx.const import ATTR_EVENT
from homeassistant.core import State
from tests.common import MockConfigEntry, mock_restore_cache
from tests.components.rfxtrx.conftest import create_rfx_test_cfg
EVENT_SMOKE_DETECTOR_PANIC = "08200300a109000670"
EVENT_SMOKE_DETECTOR_NO_PANIC = "08200300a109000770"
EVENT_MOTION_DETECTOR_MOTION = "08200100a109000470"
EVENT_MOTION_DETECTOR_NO_MOTION = "08200100a109000570"
EVENT_LIGHT_DETECTOR_LIGHT = "08200100a109001570"
EVENT_LIGHT_DETECTOR_DARK = "08200100a109001470"
EVENT_AC_118CDEA_2_ON = "0b1100100118cdea02010f70"
async def test_one(hass, rfxtrx):
"""Test with 1 sensor."""
entry_data = create_rfx_test_cfg(devices={"0b1100cd0213c7f230010f71": {}})
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.ac_213c7f2_48")
assert state
assert state.state == "off"
assert state.attributes.get("friendly_name") == "AC 213c7f2:48"
async def test_one_pt2262(hass, rfxtrx):
"""Test with 1 sensor."""
entry_data = create_rfx_test_cfg(
devices={
"0913000022670e013970": {
"data_bits": 4,
"command_on": 0xE,
"command_off": 0x7,
}
}
)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
state = hass.states.get("binary_sensor.pt2262_22670e")
assert state
assert state.state == "off" # probably aught to be unknown
assert state.attributes.get("friendly_name") == "PT2262 22670e"
await rfxtrx.signal("0913000022670e013970")
state = hass.states.get("binary_sensor.pt2262_22670e")
assert state.state == "on"
await rfxtrx.signal("09130000226707013d70")
state = hass.states.get("binary_sensor.pt2262_22670e")
assert state.state == "off"
async def test_pt2262_unconfigured(hass, rfxtrx):
"""Test with discovery for PT2262."""
entry_data = create_rfx_test_cfg(
devices={"0913000022670e013970": {}, "09130000226707013970": {}}
)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
state = hass.states.get("binary_sensor.pt2262_22670e")
assert state
assert state.state == "off" # probably aught to be unknown
assert state.attributes.get("friendly_name") == "PT2262 22670e"
state = hass.states.get("binary_sensor.pt2262_226707")
assert state
assert state.state == "off" # probably aught to be unknown
assert state.attributes.get("friendly_name") == "PT2262 226707"
@pytest.mark.parametrize(
"state,event",
[["on", "0b1100cd0213c7f230010f71"], ["off", "0b1100cd0213c7f230000f71"]],
)
async def test_state_restore(hass, rfxtrx, state, event):
"""State restoration."""
entity_id = "binary_sensor.ac_213c7f2_48"
mock_restore_cache(hass, [State(entity_id, state, attributes={ATTR_EVENT: event})])
entry_data = create_rfx_test_cfg(devices={"0b1100cd0213c7f230010f71": {}})
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == state
async def test_several(hass, rfxtrx):
"""Test with 3."""
entry_data = create_rfx_test_cfg(
devices={
"0b1100cd0213c7f230010f71": {},
"0b1100100118cdea02010f70": {},
"0b1100101118cdea02010f70": {},
}
)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.ac_213c7f2_48")
assert state
assert state.state == "off"
assert state.attributes.get("friendly_name") == "AC 213c7f2:48"
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "off"
assert state.attributes.get("friendly_name") == "AC 118cdea:2"
state = hass.states.get("binary_sensor.ac_1118cdea_2")
assert state
assert state.state == "off"
assert state.attributes.get("friendly_name") == "AC 1118cdea:2"
async def test_discover(hass, rfxtrx_automatic):
"""Test with discovery."""
rfxtrx = rfxtrx_automatic
await rfxtrx.signal("0b1100100118cdea02010f70")
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "on"
await rfxtrx.signal("0b1100100118cdeb02010f70")
state = hass.states.get("binary_sensor.ac_118cdeb_2")
assert state
assert state.state == "on"
async def test_off_delay_restore(hass, rfxtrx):
"""Make sure binary sensor restore as off, if off delay is active."""
mock_restore_cache(
hass,
[
State(
"binary_sensor.ac_118cdea_2",
"on",
attributes={ATTR_EVENT: EVENT_AC_118CDEA_2_ON},
)
],
)
entry_data = create_rfx_test_cfg(devices={EVENT_AC_118CDEA_2_ON: {"off_delay": 5}})
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "off"
async def test_off_delay(hass, rfxtrx, timestep):
"""Test with discovery."""
entry_data = create_rfx_test_cfg(
devices={"0b1100100118cdea02010f70": {"off_delay": 5}}
)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "off"
await rfxtrx.signal("0b1100100118cdea02010f70")
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "on"
await timestep(4)
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "on"
await timestep(4)
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "off"
await rfxtrx.signal("0b1100100118cdea02010f70")
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "on"
await timestep(3)
await rfxtrx.signal("0b1100100118cdea02010f70")
await timestep(4)
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "on"
await timestep(4)
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "off"
async def test_panic(hass, rfxtrx_automatic):
"""Test panic entities."""
rfxtrx = rfxtrx_automatic
entity_id = "binary_sensor.kd101_smoke_detector_a10900_32"
await rfxtrx.signal(EVENT_SMOKE_DETECTOR_PANIC)
assert hass.states.get(entity_id).state == "on"
assert hass.states.get(entity_id).attributes.get("device_class") == "smoke"
await rfxtrx.signal(EVENT_SMOKE_DETECTOR_NO_PANIC)
assert hass.states.get(entity_id).state == "off"
async def test_motion(hass, rfxtrx_automatic):
"""Test motion entities."""
rfxtrx = rfxtrx_automatic
entity_id = "binary_sensor.x10_security_motion_detector_a10900_32"
await rfxtrx.signal(EVENT_MOTION_DETECTOR_MOTION)
assert hass.states.get(entity_id).state == "on"
assert hass.states.get(entity_id).attributes.get("device_class") == "motion"
await rfxtrx.signal(EVENT_MOTION_DETECTOR_NO_MOTION)
assert hass.states.get(entity_id).state == "off"
async def test_light(hass, rfxtrx_automatic):
"""Test light entities."""
rfxtrx = rfxtrx_automatic
entity_id = "binary_sensor.x10_security_motion_detector_a10900_32"
await rfxtrx.signal(EVENT_LIGHT_DETECTOR_LIGHT)
assert hass.states.get(entity_id).state == "on"
await rfxtrx.signal(EVENT_LIGHT_DETECTOR_DARK)
assert hass.states.get(entity_id).state == "off"
async def test_pt2262_duplicate_id(hass, rfxtrx):
"""Test with 1 sensor."""
entry_data = create_rfx_test_cfg(
devices={
"0913000022670e013970": {
"data_bits": 4,
"command_on": 0xE,
"command_off": 0x7,
},
"09130000226707013970": {
"data_bits": 4,
"command_on": 0xE,
"command_off": 0x7,
},
}
)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
state = hass.states.get("binary_sensor.pt2262_22670e")
assert state
assert state.state == "off" # probably aught to be unknown
assert state.attributes.get("friendly_name") == "PT2262 22670e"
|
import pytest
import sys
from unittest.mock import Mock, patch
from kombu.message import Message
class test_Message:
def test_repr(self):
assert repr(Message('b', channel=Mock()))
def test_decode(self):
m = Message('body', channel=Mock())
decode = m._decode = Mock()
assert m._decoded_cache is None
assert m.decode() is m._decode.return_value
assert m._decoded_cache is m._decode.return_value
m._decode.assert_called_with()
m._decode = Mock()
assert m.decode() is decode.return_value
def test_reraise_error(self):
m = Message('body', channel=Mock())
callback = Mock(name='callback')
try:
raise KeyError('foo')
except KeyError:
m.errors.append(sys.exc_info())
m._reraise_error(callback)
callback.assert_called()
with pytest.raises(KeyError):
m._reraise_error(None)
@patch('kombu.message.decompress')
def test_decompression_stores_error(self, decompress):
decompress.side_effect = RuntimeError()
m = Message('body', channel=Mock(), headers={'compression': 'zlib'})
with pytest.raises(RuntimeError):
m._reraise_error(None)
|
from datetime import datetime
import pytest
from homeassistant.components.season.sensor import (
STATE_AUTUMN,
STATE_SPRING,
STATE_SUMMER,
STATE_WINTER,
TYPE_ASTRONOMICAL,
TYPE_METEOROLOGICAL,
)
from homeassistant.const import STATE_UNKNOWN
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
HEMISPHERE_NORTHERN = {
"homeassistant": {"latitude": "48.864716", "longitude": "2.349014"},
"sensor": {"platform": "season", "type": "astronomical"},
}
HEMISPHERE_SOUTHERN = {
"homeassistant": {"latitude": "-33.918861", "longitude": "18.423300"},
"sensor": {"platform": "season", "type": "astronomical"},
}
HEMISPHERE_EQUATOR = {
"homeassistant": {"latitude": "0", "longitude": "-51.065100"},
"sensor": {"platform": "season", "type": "astronomical"},
}
HEMISPHERE_EMPTY = {
"homeassistant": {},
"sensor": {"platform": "season", "type": "meteorological"},
}
NORTHERN_PARAMETERS = [
(TYPE_ASTRONOMICAL, datetime(2017, 9, 3, 0, 0), STATE_SUMMER),
(TYPE_METEOROLOGICAL, datetime(2017, 8, 13, 0, 0), STATE_SUMMER),
(TYPE_ASTRONOMICAL, datetime(2017, 9, 23, 0, 0), STATE_AUTUMN),
(TYPE_METEOROLOGICAL, datetime(2017, 9, 3, 0, 0), STATE_AUTUMN),
(TYPE_ASTRONOMICAL, datetime(2017, 12, 25, 0, 0), STATE_WINTER),
(TYPE_METEOROLOGICAL, datetime(2017, 12, 3, 0, 0), STATE_WINTER),
(TYPE_ASTRONOMICAL, datetime(2017, 4, 1, 0, 0), STATE_SPRING),
(TYPE_METEOROLOGICAL, datetime(2017, 3, 3, 0, 0), STATE_SPRING),
]
SOUTHERN_PARAMETERS = [
(TYPE_ASTRONOMICAL, datetime(2017, 12, 25, 0, 0), STATE_SUMMER),
(TYPE_METEOROLOGICAL, datetime(2017, 12, 3, 0, 0), STATE_SUMMER),
(TYPE_ASTRONOMICAL, datetime(2017, 4, 1, 0, 0), STATE_AUTUMN),
(TYPE_METEOROLOGICAL, datetime(2017, 3, 3, 0, 0), STATE_AUTUMN),
(TYPE_ASTRONOMICAL, datetime(2017, 9, 3, 0, 0), STATE_WINTER),
(TYPE_METEOROLOGICAL, datetime(2017, 8, 13, 0, 0), STATE_WINTER),
(TYPE_ASTRONOMICAL, datetime(2017, 9, 23, 0, 0), STATE_SPRING),
(TYPE_METEOROLOGICAL, datetime(2017, 9, 3, 0, 0), STATE_SPRING),
]
def idfn(val):
"""Provide IDs for pytest parametrize."""
if isinstance(val, (datetime)):
return val.strftime("%Y%m%d")
@pytest.mark.parametrize("type,day,expected", NORTHERN_PARAMETERS, ids=idfn)
async def test_season_northern_hemisphere(hass, type, day, expected):
"""Test that season should be summer."""
hass.config.latitude = HEMISPHERE_NORTHERN["homeassistant"]["latitude"]
config = {
**HEMISPHERE_NORTHERN,
"sensor": {"platform": "season", "type": type},
}
with patch("homeassistant.components.season.sensor.utcnow", return_value=day):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.season")
assert state
assert state.state == expected
@pytest.mark.parametrize("type,day,expected", SOUTHERN_PARAMETERS, ids=idfn)
async def test_season_southern_hemisphere(hass, type, day, expected):
"""Test that season should be summer."""
hass.config.latitude = HEMISPHERE_SOUTHERN["homeassistant"]["latitude"]
config = {
**HEMISPHERE_SOUTHERN,
"sensor": {"platform": "season", "type": type},
}
with patch("homeassistant.components.season.sensor.utcnow", return_value=day):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.season")
assert state
assert state.state == expected
async def test_season_equator(hass):
"""Test that season should be unknown for equator."""
hass.config.latitude = HEMISPHERE_EQUATOR["homeassistant"]["latitude"]
day = datetime(2017, 9, 3, 0, 0)
with patch("homeassistant.components.season.sensor.utcnow", return_value=day):
assert await async_setup_component(hass, "sensor", HEMISPHERE_EQUATOR)
await hass.async_block_till_done()
state = hass.states.get("sensor.season")
assert state
assert state.state == STATE_UNKNOWN
async def test_setup_hemisphere_empty(hass):
"""Test platform setup of missing latlong."""
hass.config.latitude = None
assert await async_setup_component(hass, "sensor", HEMISPHERE_EMPTY)
await hass.async_block_till_done()
assert hass.config.as_dict()["latitude"] is None
|
from typing import Optional, Any, Sequence, Tuple, Callable, List, Type
from typing import Union
from tensornetwork.backends import abstract_backend
from tensornetwork.backends.pytorch import decompositions
import numpy as np
# This might seem bad, but pytype treats tf.Tensor as Any anyway, so
# we don't actually lose anything by doing this.
Tensor = Any
# pylint: disable=abstract-method
class PyTorchBackend(abstract_backend.AbstractBackend):
"""See base_backend.BaseBackend for documentation."""
def __init__(self) -> None:
super().__init__()
# pylint: disable=global-variable-undefined
global torchlib
try:
# pylint: disable=import-outside-toplevel
import torch
except ImportError as err:
raise ImportError("PyTorch not installed, please switch to a different "
"backend or install PyTorch.") from err
torchlib = torch
self.name = "pytorch"
def tensordot(self, a: Tensor, b: Tensor,
axes: Union[int, Sequence[Sequence[int]]]) -> Tensor:
return torchlib.tensordot(a, b, dims=axes)
def reshape(self, tensor: Tensor, shape: Tensor) -> Tensor:
return torchlib.reshape(tensor, tuple(np.array(shape).astype(int)))
def transpose(self, tensor, perm=None) -> Tensor:
if perm is None:
perm = tuple(range(tensor.ndim - 1, -1, -1))
return tensor.permute(perm)
def slice(self, tensor: Tensor, start_indices: Tuple[int, ...],
slice_sizes: Tuple[int, ...]) -> Tensor:
if len(start_indices) != len(slice_sizes):
raise ValueError("Lengths of start_indices and slice_sizes must be"
"identical.")
obj = tuple(
slice(start, start + size)
for start, size in zip(start_indices, slice_sizes))
return tensor[obj]
def svd(
self,
tensor: Tensor,
pivot_axis: int = -1,
max_singular_values: Optional[int] = None,
max_truncation_error: Optional[float] = None,
relative: Optional[bool] = False
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
return decompositions.svd(
torchlib,
tensor,
pivot_axis,
max_singular_values,
max_truncation_error,
relative=relative)
def qr(
self,
tensor: Tensor,
pivot_axis: int = -1,
non_negative_diagonal: bool = False
) -> Tuple[Tensor, Tensor]:
return decompositions.qr(torchlib, tensor, pivot_axis, non_negative_diagonal)
def rq(
self,
tensor: Tensor,
pivot_axis: int = -1,
non_negative_diagonal: bool = False
) -> Tuple[Tensor, Tensor]:
return decompositions.rq(torchlib, tensor, pivot_axis, non_negative_diagonal)
def shape_concat(self, values: Tensor, axis: int) -> Tensor:
return np.concatenate(values, axis)
def shape_tensor(self, tensor: Tensor) -> Tensor:
return torchlib.tensor(list(tensor.shape))
def shape_tuple(self, tensor: Tensor) -> Tuple[Optional[int], ...]:
return tuple(tensor.shape)
def sparse_shape(self, tensor: Tensor) -> Tuple[Optional[int], ...]:
return self.shape_tuple(tensor)
def shape_prod(self, values: Tensor) -> int:
return np.prod(np.array(values))
def sqrt(self, tensor: Tensor) -> Tensor:
return torchlib.sqrt(tensor)
def convert_to_tensor(self, tensor: Tensor) -> Tensor:
result = torchlib.as_tensor(tensor)
return result
def outer_product(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return torchlib.tensordot(tensor1, tensor2, dims=0)
# pylint: disable=unused-argument
def einsum(self,
expression: str,
*tensors: Tensor,
optimize: bool = True) -> Tensor:
return torchlib.einsum(expression, *tensors)
def norm(self, tensor: Tensor) -> Tensor:
return torchlib.norm(tensor)
def eye(self,
N: int,
dtype: Optional[Any] = None,
M: Optional[int] = None) -> Tensor:
dtype = dtype if dtype is not None else torchlib.float64
if not M:
M = N #torch crashes if one passes M = None with dtype!=None
return torchlib.eye(n=N, m=M, dtype=dtype)
def ones(self, shape: Tuple[int, ...], dtype: Optional[Any] = None) -> Tensor:
dtype = dtype if dtype is not None else torchlib.float64
return torchlib.ones(shape, dtype=dtype)
def zeros(self,
shape: Tuple[int, ...],
dtype: Optional[Any] = None) -> Tensor:
dtype = dtype if dtype is not None else torchlib.float64
return torchlib.zeros(shape, dtype=dtype)
def randn(self,
shape: Tuple[int, ...],
dtype: Optional[Any] = None,
seed: Optional[int] = None) -> Tensor:
if seed:
torchlib.manual_seed(seed)
dtype = dtype if dtype is not None else torchlib.float64
return torchlib.randn(shape, dtype=dtype)
def random_uniform(self,
shape: Tuple[int, ...],
boundaries: Optional[Tuple[float, float]] = (0.0, 1.0),
dtype: Optional[Any] = None,
seed: Optional[int] = None) -> Tensor:
if seed:
torchlib.manual_seed(seed)
dtype = dtype if dtype is not None else torchlib.float64
return torchlib.empty(shape, dtype=dtype).uniform_(*boundaries)
def conj(self, tensor: Tensor) -> Tensor:
return tensor #pytorch does not support complex dtypes
def eigh(self, matrix: Tensor) -> Tuple[Tensor, Tensor]:
return matrix.symeig(eigenvectors=True)
def eigsh_lanczos(self,
A: Callable,
args: Optional[List[Tensor]] = None,
initial_state: Optional[Tensor] = None,
shape: Optional[Tuple] = None,
dtype: Optional[Type[np.number]] = None,
num_krylov_vecs: int = 20,
numeig: int = 1,
tol: float = 1E-8,
delta: float = 1E-8,
ndiag: int = 20,
reorthogonalize: bool = False) -> Tuple[Tensor, List]:
"""
Lanczos method for finding the lowest eigenvector-eigenvalue pairs
of a `LinearOperator` `A`.
Args:
A: A (sparse) implementation of a linear operator.
Call signature of `A` is `res = A(vector, *args)`, where `vector`
can be an arbitrary `Tensor`, and `res.shape` has to be `vector.shape`.
arsg: A list of arguments to `A`. `A` will be called as
`res = A(initial_state, *args)`.
initial_state: An initial vector for the Lanczos algorithm. If `None`,
a random initial `Tensor` is created using the `torch.randn` method
shape: The shape of the input-dimension of `A`.
dtype: The dtype of the input `A`. If both no `initial_state` is provided,
a random initial state with shape `shape` and dtype `dtype` is created.
num_krylov_vecs: The number of iterations (number of krylov vectors).
numeig: The nummber of eigenvector-eigenvalue pairs to be computed.
If `numeig > 1`, `reorthogonalize` has to be `True`.
tol: The desired precision of the eigenvalus. Uses
`torch.norm(eigvalsnew[0:numeig] - eigvalsold[0:numeig]) < tol`
as stopping criterion between two diagonalization steps of the
tridiagonal operator.
delta: Stopping criterion for Lanczos iteration.
If a Krylov vector :math: `x_n` has an L2 norm
:math:`\\lVert x_n\\rVert < delta`, the iteration
is stopped. It means that an (approximate) invariant subspace has
been found.
ndiag: The tridiagonal Operator is diagonalized every `ndiag`
iterations to check convergence.
reorthogonalize: If `True`, Krylov vectors are kept orthogonal by
explicit orthogonalization (more costly than `reorthogonalize=False`)
Returns:
(eigvals, eigvecs)
eigvals: A list of `numeig` lowest eigenvalues
eigvecs: A list of `numeig` lowest eigenvectors
"""
if args is None:
args = []
#TODO: make this work for tensorflow in graph mode
if num_krylov_vecs < numeig:
raise ValueError('`num_krylov_vecs` >= `numeig` required!')
if numeig > 1 and not reorthogonalize:
raise ValueError(
"Got numeig = {} > 1 and `reorthogonalize = False`. "
"Use `reorthogonalize=True` for `numeig > 1`".format(numeig))
if initial_state is None:
if (shape is None) or (dtype is None):
raise ValueError("if no `initial_state` is passed, then `shape` and"
"`dtype` have to be provided")
initial_state = self.randn(shape, dtype)
if not isinstance(initial_state, torchlib.Tensor):
raise TypeError("Expected a `torch.Tensor`. Got {}".format(
type(initial_state)))
initial_state = self.convert_to_tensor(initial_state)
vector_n = initial_state
Z = self.norm(vector_n)
vector_n /= Z
norms_vector_n = []
diag_elements = []
krylov_vecs = []
first = True
eigvalsold = []
for it in range(num_krylov_vecs):
#normalize the current vector:
norm_vector_n = torchlib.norm(vector_n)
if abs(norm_vector_n) < delta:
break
norms_vector_n.append(norm_vector_n)
vector_n = vector_n / norms_vector_n[-1]
#store the Lanczos vector for later
if reorthogonalize:
for v in krylov_vecs:
vector_n -= (v.contiguous().view(-1).dot(
vector_n.contiguous().view(-1))) * torchlib.reshape(
v, vector_n.shape)
krylov_vecs.append(vector_n)
A_vector_n = A(vector_n, *args)
diag_elements.append(vector_n.contiguous().view(-1).dot(
A_vector_n.contiguous().view(-1)))
if ((it > 0) and (it % ndiag) == 0) and (len(diag_elements) >= numeig):
#diagonalize the effective Hamiltonian
A_tridiag = torchlib.diag(
torchlib.tensor(diag_elements)) + torchlib.diag(
torchlib.tensor(norms_vector_n[1:]), 1) + torchlib.diag(
torchlib.tensor(norms_vector_n[1:]), -1)
eigvals, u = A_tridiag.symeig(eigenvectors=True)
if not first:
if torchlib.norm(eigvals[0:numeig] - eigvalsold[0:numeig]) < tol:
break
first = False
eigvalsold = eigvals[0:numeig]
if it > 0:
A_vector_n -= (krylov_vecs[-1] * diag_elements[-1])
A_vector_n -= (krylov_vecs[-2] * norms_vector_n[-1])
else:
A_vector_n -= (krylov_vecs[-1] * diag_elements[-1])
vector_n = A_vector_n
A_tridiag = torchlib.diag(torchlib.tensor(diag_elements)) + torchlib.diag(
torchlib.tensor(norms_vector_n[1:]), 1) + torchlib.diag(
torchlib.tensor(norms_vector_n[1:]), -1)
eigvals, u = A_tridiag.symeig(eigenvectors=True)
eigenvectors = []
for n2 in range(min(numeig, len(eigvals))):
state = self.zeros(initial_state.shape, initial_state.dtype)
for n1, vec in enumerate(krylov_vecs):
state += vec * u[n1, n2]
eigenvectors.append(state / torchlib.norm(state))
return eigvals[0:numeig], eigenvectors
def addition(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensor1 + tensor2
def subtraction(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensor1 - tensor2
def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensor1 * tensor2
def divide(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensor1 / tensor2
def index_update(self, tensor: Tensor, mask: Tensor,
assignee: Tensor) -> Tensor:
#make a copy
t = torchlib.as_tensor(tensor).clone()
t[mask] = assignee
return t
def inv(self, matrix: Tensor) -> Tensor:
if len(matrix.shape) > 2:
raise ValueError(
"input to pytorch backend method `inv` has shape {}. Only matrices are supported."
.format(matrix.shape))
return matrix.inverse()
def broadcast_right_multiplication(self, tensor1: Tensor,
tensor2: Tensor) -> Tensor:
if len(tensor2.shape) != 1:
raise ValueError(
"only order-1 tensors are allowed for `tensor2`, found `tensor2.shape = {}`"
.format(tensor2.shape))
return tensor1 * tensor2
def broadcast_left_multiplication(self, tensor1: Tensor,
tensor2: Tensor) -> Tensor:
if len(tensor1.shape) != 1:
raise ValueError("only order-1 tensors are allowed for `tensor1`,"
" found `tensor1.shape = {}`".format(tensor1.shape))
t1_broadcast_shape = self.shape_concat(
[self.shape_tensor(tensor1), [1] * (len(tensor2.shape) - 1)], axis=-1)
return tensor2 * self.reshape(tensor1, t1_broadcast_shape)
def jit(self, fun: Callable, *args: List, **kwargs: dict) -> Callable:
return fun
def sum(self,
tensor: Tensor,
axis: Optional[Sequence[int]] = None,
keepdims: bool = False) -> Tensor:
return torchlib.sum(tensor, axis=axis, keepdim=keepdims)
def matmul(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
if (tensor1.ndim <= 1) or (tensor2.ndim <= 1):
raise ValueError("inputs to `matmul` have to be a tensors of order > 1,")
return torchlib.einsum('...ab,...bc->...ac', tensor1, tensor2)
def diagonal(self, tensor: Tensor, offset: int = 0, axis1: int = -2,
axis2: int = -1) -> Tensor:
"""Return specified diagonals.
If tensor is 2-D, returns the diagonal of tensor with the given offset,
i.e., the collection of elements of the form a[i, i+offset].
If a has more than two dimensions, then the axes specified by
axis1 and axis2 are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by removing
axis1 and axis2 and appending an index to the right equal to the size of the
resulting diagonals.
This function only extracts diagonals. If you
wish to create diagonal matrices from vectors, use diagflat.
Args:
tensor: A tensor.
offset: Offset of the diagonal from the main diagonal.
axis1, axis2: Axis to be used as the first/second axis of the 2D
sub-arrays from which the diagonals should be taken.
Defaults to second-last and last axis (note this
differs from the NumPy defaults).
Returns:
array_of_diagonals: A dim = min(1, tensor.ndim - 2) tensor storing
the batched diagonals.
"""
if axis1 == axis2:
raise ValueError("axis1={axis1} and axis2={axis2} must be different.")
return torchlib.diagonal(tensor, offset=offset, dim1=axis1, dim2=axis2)
def diagflat(self, tensor: Tensor, k: int = 0) -> Tensor:
""" Flattens tensor and creates a new matrix of zeros with its elements
on the k'th diagonal.
Args:
tensor: A tensor.
k : The diagonal upon which to place its elements.
Returns:
tensor: A new tensor with all zeros save the specified diagonal.
"""
return torchlib.diag_embed(tensor, offset=k)
def trace(self, tensor: Tensor, offset: int = 0, axis1: int = -2,
axis2: int = -1) -> Tensor:
"""Return summed entries along diagonals.
If tensor is 2-D, the sum is over the
diagonal of tensor with the given offset,
i.e., the collection of elements of the form a[i, i+offset].
If a has more than two dimensions, then the axes specified by
axis1 and axis2 are used to determine the 2-D sub-array whose diagonal is
summed.
In the PyTorch backend the trace is always over the main diagonal of the
last two entries.
Args:
tensor: A tensor.
offset: Offset of the diagonal from the main diagonal.
This argument is not supported by the PyTorch
backend and an error will be raised if they are
specified.
axis1, axis2: Axis to be used as the first/second axis of the 2D
sub-arrays from which the diagonals should be taken.
Defaults to first/second axis.
These arguments are not supported by the PyTorch
backend and an error will be raised if they are
specified.
Returns:
array_of_diagonals: The batched summed diagonals.
"""
if offset != 0:
errstr = (f"offset = {offset} must be 0 (the default)"
f"with PyTorch backend.")
raise NotImplementedError(errstr)
if axis1 == axis2:
raise ValueError(f"axis1 = {axis1} cannot equal axis2 = {axis2}")
N = len(tensor.shape)
if N > 25:
raise ValueError(f"Currently only tensors with ndim <= 25 can be traced"
f"in the PyTorch backend (yours was {N})")
if axis1 < 0:
axis1 = N+axis1
if axis2 < 0:
axis2 = N+axis2
inds = list(map(chr, range(98, 98+N)))
indsout = [i for n, i in enumerate(inds) if n not in (axis1, axis2)]
inds[axis1] = 'a'
inds[axis2] = 'a'
return torchlib.einsum(''.join(inds) + '->' +''.join(indsout), tensor)
def abs(self, tensor: Tensor) -> Tensor:
"""
Returns the elementwise absolute value of tensor.
Args:
tensor: An input tensor.
Returns:
tensor: Its elementwise absolute value.
"""
return torchlib.abs(tensor)
def sign(self, tensor: Tensor) -> Tensor:
"""
Returns an elementwise tensor with entries
y[i] = 1, 0, -1 where tensor[i] > 0, == 0, and < 0 respectively.
For complex input the behaviour of this function may depend on the backend.
The PyTorch version is not implemented in this case.
Args:
tensor: The input tensor.
"""
return torchlib.sign(tensor)
def item(self, tensor):
return tensor.item()
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy import random
import matplotlib.pyplot as plt
from filterpy.kalman import KalmanFilter
DO_PLOT = False
def test_rts():
fk = KalmanFilter(dim_x=2, dim_z=1)
fk.x = np.array([-1., 1.]) # initial state (location and velocity)
fk.F = np.array([[1.,1.],
[0.,1.]]) # state transition matrix
fk.H = np.array([[1.,0.]]) # Measurement function
fk.P = .01 # covariance matrix
fk.R = 5 # state uncertainty
fk.Q = 0.001 # process uncertainty
zs = [t + random.randn()*4 for t in range(40)]
mu, cov, _, _ = fk.batch_filter (zs)
mus = [x[0] for x in mu]
M, P, _, _ = fk.rts_smoother(mu, cov)
if DO_PLOT:
p1, = plt.plot(zs,'cyan', alpha=0.5)
p2, = plt.plot(M[:,0],c='b')
p3, = plt.plot(mus,c='r')
p4, = plt.plot([0, len(zs)], [0, len(zs)], 'g') # perfect result
plt.legend([p1, p2, p3, p4],
["measurement", "RKS", "KF output", "ideal"], loc=4)
plt.show()
if __name__ == '__main__':
DO_PLOT = True
test_rts()
|
from binascii import unhexlify
import pytest
import voluptuous as vol
import zigpy.profiles.zha
import zigpy.types
import zigpy.zcl.clusters.general as general
from homeassistant.components.websocket_api import const
from homeassistant.components.zha import DOMAIN
from homeassistant.components.zha.api import (
ATTR_DURATION,
ATTR_INSTALL_CODE,
ATTR_QR_CODE,
ATTR_SOURCE_IEEE,
ID,
SERVICE_PERMIT,
TYPE,
async_load_api,
)
from homeassistant.components.zha.core.const import (
ATTR_CLUSTER_ID,
ATTR_CLUSTER_TYPE,
ATTR_ENDPOINT_ID,
ATTR_ENDPOINT_NAMES,
ATTR_IEEE,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_NAME,
ATTR_NEIGHBORS,
ATTR_QUIRK_APPLIED,
CLUSTER_TYPE_IN,
DATA_ZHA,
DATA_ZHA_GATEWAY,
GROUP_ID,
GROUP_IDS,
GROUP_NAME,
)
from homeassistant.core import Context
from .conftest import FIXTURE_GRP_ID, FIXTURE_GRP_NAME
from tests.async_mock import AsyncMock, patch
IEEE_SWITCH_DEVICE = "01:2d:6f:00:0a:90:69:e7"
IEEE_GROUPABLE_DEVICE = "01:2d:6f:00:0a:90:69:e8"
@pytest.fixture
async def device_switch(hass, zigpy_device_mock, zha_device_joined):
"""Test zha switch platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [general.OnOff.cluster_id, general.Basic.cluster_id],
"out_clusters": [],
"device_type": zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,
}
},
ieee=IEEE_SWITCH_DEVICE,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def device_groupable(hass, zigpy_device_mock, zha_device_joined):
"""Test zha light platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [
general.OnOff.cluster_id,
general.Basic.cluster_id,
general.Groups.cluster_id,
],
"out_clusters": [],
"device_type": zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,
}
},
ieee=IEEE_GROUPABLE_DEVICE,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def zha_client(hass, hass_ws_client, device_switch, device_groupable):
"""Test zha switch platform."""
# load the ZHA API
async_load_api(hass)
return await hass_ws_client(hass)
async def test_device_clusters(hass, zha_client):
"""Test getting device cluster info."""
await zha_client.send_json(
{ID: 5, TYPE: "zha/devices/clusters", ATTR_IEEE: IEEE_SWITCH_DEVICE}
)
msg = await zha_client.receive_json()
assert len(msg["result"]) == 2
cluster_infos = sorted(msg["result"], key=lambda k: k[ID])
cluster_info = cluster_infos[0]
assert cluster_info[TYPE] == CLUSTER_TYPE_IN
assert cluster_info[ID] == 0
assert cluster_info[ATTR_NAME] == "Basic"
cluster_info = cluster_infos[1]
assert cluster_info[TYPE] == CLUSTER_TYPE_IN
assert cluster_info[ID] == 6
assert cluster_info[ATTR_NAME] == "OnOff"
async def test_device_cluster_attributes(zha_client):
"""Test getting device cluster attributes."""
await zha_client.send_json(
{
ID: 5,
TYPE: "zha/devices/clusters/attributes",
ATTR_ENDPOINT_ID: 1,
ATTR_IEEE: IEEE_SWITCH_DEVICE,
ATTR_CLUSTER_ID: 6,
ATTR_CLUSTER_TYPE: CLUSTER_TYPE_IN,
}
)
msg = await zha_client.receive_json()
attributes = msg["result"]
assert len(attributes) == 4
for attribute in attributes:
assert attribute[ID] is not None
assert attribute[ATTR_NAME] is not None
async def test_device_cluster_commands(zha_client):
"""Test getting device cluster commands."""
await zha_client.send_json(
{
ID: 5,
TYPE: "zha/devices/clusters/commands",
ATTR_ENDPOINT_ID: 1,
ATTR_IEEE: IEEE_SWITCH_DEVICE,
ATTR_CLUSTER_ID: 6,
ATTR_CLUSTER_TYPE: CLUSTER_TYPE_IN,
}
)
msg = await zha_client.receive_json()
commands = msg["result"]
assert len(commands) == 6
for command in commands:
assert command[ID] is not None
assert command[ATTR_NAME] is not None
assert command[TYPE] is not None
async def test_list_devices(zha_client):
"""Test getting zha devices."""
await zha_client.send_json({ID: 5, TYPE: "zha/devices"})
msg = await zha_client.receive_json()
devices = msg["result"]
assert len(devices) == 2
msg_id = 100
for device in devices:
msg_id += 1
assert device[ATTR_IEEE] is not None
assert device[ATTR_MANUFACTURER] is not None
assert device[ATTR_MODEL] is not None
assert device[ATTR_NAME] is not None
assert device[ATTR_QUIRK_APPLIED] is not None
assert device["entities"] is not None
assert device[ATTR_NEIGHBORS] is not None
assert device[ATTR_ENDPOINT_NAMES] is not None
for entity_reference in device["entities"]:
assert entity_reference[ATTR_NAME] is not None
assert entity_reference["entity_id"] is not None
await zha_client.send_json(
{ID: msg_id, TYPE: "zha/device", ATTR_IEEE: device[ATTR_IEEE]}
)
msg = await zha_client.receive_json()
device2 = msg["result"]
assert device == device2
async def test_device_not_found(zha_client):
"""Test not found response from get device API."""
await zha_client.send_json(
{ID: 6, TYPE: "zha/device", ATTR_IEEE: "28:6d:97:00:01:04:11:8c"}
)
msg = await zha_client.receive_json()
assert msg["id"] == 6
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_NOT_FOUND
async def test_list_groups(zha_client):
"""Test getting zha zigbee groups."""
await zha_client.send_json({ID: 7, TYPE: "zha/groups"})
msg = await zha_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == const.TYPE_RESULT
groups = msg["result"]
assert len(groups) == 1
for group in groups:
assert group["group_id"] == FIXTURE_GRP_ID
assert group["name"] == FIXTURE_GRP_NAME
assert group["members"] == []
async def test_get_group(zha_client):
"""Test getting a specific zha zigbee group."""
await zha_client.send_json({ID: 8, TYPE: "zha/group", GROUP_ID: FIXTURE_GRP_ID})
msg = await zha_client.receive_json()
assert msg["id"] == 8
assert msg["type"] == const.TYPE_RESULT
group = msg["result"]
assert group is not None
assert group["group_id"] == FIXTURE_GRP_ID
assert group["name"] == FIXTURE_GRP_NAME
assert group["members"] == []
async def test_get_group_not_found(zha_client):
"""Test not found response from get group API."""
await zha_client.send_json({ID: 9, TYPE: "zha/group", GROUP_ID: 1_234_567})
msg = await zha_client.receive_json()
assert msg["id"] == 9
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_NOT_FOUND
async def test_list_groupable_devices(zha_client, device_groupable):
"""Test getting zha devices that have a group cluster."""
await zha_client.send_json({ID: 10, TYPE: "zha/devices/groupable"})
msg = await zha_client.receive_json()
assert msg["id"] == 10
assert msg["type"] == const.TYPE_RESULT
device_endpoints = msg["result"]
assert len(device_endpoints) == 1
for endpoint in device_endpoints:
assert endpoint["device"][ATTR_IEEE] == "01:2d:6f:00:0a:90:69:e8"
assert endpoint["device"][ATTR_MANUFACTURER] is not None
assert endpoint["device"][ATTR_MODEL] is not None
assert endpoint["device"][ATTR_NAME] is not None
assert endpoint["device"][ATTR_QUIRK_APPLIED] is not None
assert endpoint["device"]["entities"] is not None
assert endpoint["endpoint_id"] is not None
assert endpoint["entities"] is not None
for entity_reference in endpoint["device"]["entities"]:
assert entity_reference[ATTR_NAME] is not None
assert entity_reference["entity_id"] is not None
for entity_reference in endpoint["entities"]:
assert entity_reference["original_name"] is not None
# Make sure there are no groupable devices when the device is unavailable
# Make device unavailable
device_groupable.available = False
await zha_client.send_json({ID: 11, TYPE: "zha/devices/groupable"})
msg = await zha_client.receive_json()
assert msg["id"] == 11
assert msg["type"] == const.TYPE_RESULT
device_endpoints = msg["result"]
assert len(device_endpoints) == 0
async def test_add_group(zha_client):
"""Test adding and getting a new zha zigbee group."""
await zha_client.send_json({ID: 12, TYPE: "zha/group/add", GROUP_NAME: "new_group"})
msg = await zha_client.receive_json()
assert msg["id"] == 12
assert msg["type"] == const.TYPE_RESULT
added_group = msg["result"]
assert added_group["name"] == "new_group"
assert added_group["members"] == []
await zha_client.send_json({ID: 13, TYPE: "zha/groups"})
msg = await zha_client.receive_json()
assert msg["id"] == 13
assert msg["type"] == const.TYPE_RESULT
groups = msg["result"]
assert len(groups) == 2
for group in groups:
assert group["name"] == FIXTURE_GRP_NAME or group["name"] == "new_group"
async def test_remove_group(zha_client):
"""Test removing a new zha zigbee group."""
await zha_client.send_json({ID: 14, TYPE: "zha/groups"})
msg = await zha_client.receive_json()
assert msg["id"] == 14
assert msg["type"] == const.TYPE_RESULT
groups = msg["result"]
assert len(groups) == 1
await zha_client.send_json(
{ID: 15, TYPE: "zha/group/remove", GROUP_IDS: [FIXTURE_GRP_ID]}
)
msg = await zha_client.receive_json()
assert msg["id"] == 15
assert msg["type"] == const.TYPE_RESULT
groups_remaining = msg["result"]
assert len(groups_remaining) == 0
await zha_client.send_json({ID: 16, TYPE: "zha/groups"})
msg = await zha_client.receive_json()
assert msg["id"] == 16
assert msg["type"] == const.TYPE_RESULT
groups = msg["result"]
assert len(groups) == 0
@pytest.fixture
async def app_controller(hass, setup_zha):
"""Fixture for zigpy Application Controller."""
await setup_zha()
controller = hass.data[DATA_ZHA][DATA_ZHA_GATEWAY].application_controller
p1 = patch.object(controller, "permit")
p2 = patch.object(controller, "permit_with_key", new=AsyncMock())
with p1, p2:
yield controller
@pytest.mark.parametrize(
"params, duration, node",
(
({}, 60, None),
({ATTR_DURATION: 30}, 30, None),
(
{ATTR_DURATION: 33, ATTR_IEEE: "aa:bb:cc:dd:aa:bb:cc:dd"},
33,
zigpy.types.EUI64.convert("aa:bb:cc:dd:aa:bb:cc:dd"),
),
(
{ATTR_IEEE: "aa:bb:cc:dd:aa:bb:cc:d1"},
60,
zigpy.types.EUI64.convert("aa:bb:cc:dd:aa:bb:cc:d1"),
),
),
)
async def test_permit_ha12(
hass, app_controller, hass_admin_user, params, duration, node
):
"""Test permit service."""
await hass.services.async_call(
DOMAIN, SERVICE_PERMIT, params, True, Context(user_id=hass_admin_user.id)
)
assert app_controller.permit.await_count == 1
assert app_controller.permit.await_args[1]["time_s"] == duration
assert app_controller.permit.await_args[1]["node"] == node
assert app_controller.permit_with_key.call_count == 0
IC_TEST_PARAMS = (
(
{
ATTR_SOURCE_IEEE: IEEE_SWITCH_DEVICE,
ATTR_INSTALL_CODE: "5279-7BF4-A508-4DAA-8E17-12B6-1741-CA02-4051",
},
zigpy.types.EUI64.convert(IEEE_SWITCH_DEVICE),
unhexlify("52797BF4A5084DAA8E1712B61741CA024051"),
),
(
{
ATTR_SOURCE_IEEE: IEEE_SWITCH_DEVICE,
ATTR_INSTALL_CODE: "52797BF4A5084DAA8E1712B61741CA024051",
},
zigpy.types.EUI64.convert(IEEE_SWITCH_DEVICE),
unhexlify("52797BF4A5084DAA8E1712B61741CA024051"),
),
)
@pytest.mark.parametrize("params, src_ieee, code", IC_TEST_PARAMS)
async def test_permit_with_install_code(
hass, app_controller, hass_admin_user, params, src_ieee, code
):
"""Test permit service with install code."""
await hass.services.async_call(
DOMAIN, SERVICE_PERMIT, params, True, Context(user_id=hass_admin_user.id)
)
assert app_controller.permit.await_count == 0
assert app_controller.permit_with_key.call_count == 1
assert app_controller.permit_with_key.await_args[1]["time_s"] == 60
assert app_controller.permit_with_key.await_args[1]["node"] == src_ieee
assert app_controller.permit_with_key.await_args[1]["code"] == code
IC_FAIL_PARAMS = (
{
# wrong install code
ATTR_SOURCE_IEEE: IEEE_SWITCH_DEVICE,
ATTR_INSTALL_CODE: "5279-7BF4-A508-4DAA-8E17-12B6-1741-CA02-4052",
},
# incorrect service params
{ATTR_INSTALL_CODE: "5279-7BF4-A508-4DAA-8E17-12B6-1741-CA02-4051"},
{ATTR_SOURCE_IEEE: IEEE_SWITCH_DEVICE},
{
# incorrect service params
ATTR_INSTALL_CODE: "5279-7BF4-A508-4DAA-8E17-12B6-1741-CA02-4051",
ATTR_QR_CODE: "Z:000D6FFFFED4163B$I:52797BF4A5084DAA8E1712B61741CA024051",
},
{
# incorrect service params
ATTR_SOURCE_IEEE: IEEE_SWITCH_DEVICE,
ATTR_QR_CODE: "Z:000D6FFFFED4163B$I:52797BF4A5084DAA8E1712B61741CA024051",
},
{
# good regex match, but bad code
ATTR_QR_CODE: "Z:000D6FFFFED4163B$I:52797BF4A5084DAA8E1712B61741CA024052"
},
{
# good aqara regex match, but bad code
ATTR_QR_CODE: (
"G$M:751$S:357S00001579$D:000000000F350FFD%Z$A:04CF8CDF"
"3C3C3C3C$I:52797BF4A5084DAA8E1712B61741CA024052"
)
},
# good consciot regex match, but bad code
{ATTR_QR_CODE: "000D6FFFFED4163B|52797BF4A5084DAA8E1712B61741CA024052"},
)
@pytest.mark.parametrize("params", IC_FAIL_PARAMS)
async def test_permit_with_install_code_fail(
hass, app_controller, hass_admin_user, params
):
"""Test permit service with install code."""
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN, SERVICE_PERMIT, params, True, Context(user_id=hass_admin_user.id)
)
assert app_controller.permit.await_count == 0
assert app_controller.permit_with_key.call_count == 0
IC_QR_CODE_TEST_PARAMS = (
(
{ATTR_QR_CODE: "000D6FFFFED4163B|52797BF4A5084DAA8E1712B61741CA024051"},
zigpy.types.EUI64.convert("00:0D:6F:FF:FE:D4:16:3B"),
unhexlify("52797BF4A5084DAA8E1712B61741CA024051"),
),
(
{ATTR_QR_CODE: "Z:000D6FFFFED4163B$I:52797BF4A5084DAA8E1712B61741CA024051"},
zigpy.types.EUI64.convert("00:0D:6F:FF:FE:D4:16:3B"),
unhexlify("52797BF4A5084DAA8E1712B61741CA024051"),
),
(
{
ATTR_QR_CODE: (
"G$M:751$S:357S00001579$D:000000000F350FFD%Z$A:04CF8CDF"
"3C3C3C3C$I:52797BF4A5084DAA8E1712B61741CA024051"
)
},
zigpy.types.EUI64.convert("04:CF:8C:DF:3C:3C:3C:3C"),
unhexlify("52797BF4A5084DAA8E1712B61741CA024051"),
),
)
@pytest.mark.parametrize("params, src_ieee, code", IC_QR_CODE_TEST_PARAMS)
async def test_permit_with_qr_code(
hass, app_controller, hass_admin_user, params, src_ieee, code
):
"""Test permit service with install code from qr code."""
await hass.services.async_call(
DOMAIN, SERVICE_PERMIT, params, True, Context(user_id=hass_admin_user.id)
)
assert app_controller.permit.await_count == 0
assert app_controller.permit_with_key.call_count == 1
assert app_controller.permit_with_key.await_args[1]["time_s"] == 60
assert app_controller.permit_with_key.await_args[1]["node"] == src_ieee
assert app_controller.permit_with_key.await_args[1]["code"] == code
@pytest.mark.parametrize("params, src_ieee, code", IC_QR_CODE_TEST_PARAMS)
async def test_ws_permit_with_qr_code(
app_controller, zha_client, params, src_ieee, code
):
"""Test permit service with install code from qr code."""
await zha_client.send_json(
{ID: 14, TYPE: f"{DOMAIN}/devices/{SERVICE_PERMIT}", **params}
)
msg = await zha_client.receive_json()
assert msg["id"] == 14
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert app_controller.permit.await_count == 0
assert app_controller.permit_with_key.call_count == 1
assert app_controller.permit_with_key.await_args[1]["time_s"] == 60
assert app_controller.permit_with_key.await_args[1]["node"] == src_ieee
assert app_controller.permit_with_key.await_args[1]["code"] == code
@pytest.mark.parametrize("params", IC_FAIL_PARAMS)
async def test_ws_permit_with_install_code_fail(app_controller, zha_client, params):
"""Test permit ws service with install code."""
await zha_client.send_json(
{ID: 14, TYPE: f"{DOMAIN}/devices/{SERVICE_PERMIT}", **params}
)
msg = await zha_client.receive_json()
assert msg["id"] == 14
assert msg["type"] == const.TYPE_RESULT
assert msg["success"] is False
assert app_controller.permit.await_count == 0
assert app_controller.permit_with_key.call_count == 0
@pytest.mark.parametrize(
"params, duration, node",
(
({}, 60, None),
({ATTR_DURATION: 30}, 30, None),
(
{ATTR_DURATION: 33, ATTR_IEEE: "aa:bb:cc:dd:aa:bb:cc:dd"},
33,
zigpy.types.EUI64.convert("aa:bb:cc:dd:aa:bb:cc:dd"),
),
(
{ATTR_IEEE: "aa:bb:cc:dd:aa:bb:cc:d1"},
60,
zigpy.types.EUI64.convert("aa:bb:cc:dd:aa:bb:cc:d1"),
),
),
)
async def test_ws_permit_ha12(app_controller, zha_client, params, duration, node):
"""Test permit ws service."""
await zha_client.send_json(
{ID: 14, TYPE: f"{DOMAIN}/devices/{SERVICE_PERMIT}", **params}
)
msg = await zha_client.receive_json()
assert msg["id"] == 14
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert app_controller.permit.await_count == 1
assert app_controller.permit.await_args[1]["time_s"] == duration
assert app_controller.permit.await_args[1]["node"] == node
assert app_controller.permit_with_key.call_count == 0
|
import os
import subprocess
from subprocess import PIPE
def cmd(args, capture_output=True):
if capture_output:
ret = subprocess.run(args.split(" "), stdout=PIPE, stderr=PIPE)
ret.stdout = ret.stdout.decode("utf-8")
ret.stderr = ret.stderr.decode("utf-8")
return ret
else:
return subprocess.run(args.split(" "))
def start_paasta_api():
print("Starting Paasta API Server")
p = subprocess.Popen(
"python -m paasta_tools.api.api -D -c {} {}".format(
os.environ["KIND_CLUSTER"], os.environ["PAASTA_API_PORT"]
).split(" ")
)
return p
def paasta_apply():
print("Applying SOA configurations")
service_instances = cmd("python -m paasta_tools.list_kubernetes_service_instances")
cmd(
"python -m paasta_tools.setup_kubernetes_job {} -v".format(
service_instances.stdout.strip()
),
False,
)
def init_all():
paasta_apply()
|
from homeassistant.components.ruckus_unleashed import DOMAIN
from homeassistant.components.ruckus_unleashed.const import (
API_ACCESS_POINT,
API_AP,
API_DEVICE_NAME,
API_ID,
API_IP,
API_MAC,
API_MODEL,
API_NAME,
API_SERIAL,
API_SYSTEM_OVERVIEW,
API_VERSION,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from tests.async_mock import patch
from tests.common import MockConfigEntry
DEFAULT_TITLE = "Ruckus Mesh"
DEFAULT_UNIQUE_ID = "123456789012"
DEFAULT_SYSTEM_INFO = {
API_SYSTEM_OVERVIEW: {
API_SERIAL: DEFAULT_UNIQUE_ID,
API_VERSION: "v1.0.0",
}
}
DEFAULT_AP_INFO = {
API_AP: {
API_ID: {
"1": {
API_MAC: "00:11:22:33:44:55",
API_DEVICE_NAME: "Test Device",
API_MODEL: "r510",
}
}
}
}
CONFIG = {
CONF_HOST: "1.1.1.1",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
}
TEST_CLIENT_ENTITY_ID = "device_tracker.ruckus_test_device"
TEST_CLIENT = {
API_IP: "1.1.1.2",
API_MAC: "AA:BB:CC:DD:EE:FF",
API_NAME: "Ruckus Test Device",
API_ACCESS_POINT: "00:11:22:33:44:55",
}
def mock_config_entry() -> MockConfigEntry:
"""Return a Ruckus Unleashed mock config entry."""
return MockConfigEntry(
domain=DOMAIN,
title=DEFAULT_TITLE,
unique_id=DEFAULT_UNIQUE_ID,
data=CONFIG,
options=None,
)
async def init_integration(hass) -> MockConfigEntry:
"""Set up the Ruckus Unleashed integration in Home Assistant."""
entry = mock_config_entry()
with patch(
"homeassistant.components.ruckus_unleashed.Ruckus.connect",
return_value=None,
), patch(
"homeassistant.components.ruckus_unleashed.Ruckus.mesh_name",
return_value=DEFAULT_TITLE,
), patch(
"homeassistant.components.ruckus_unleashed.Ruckus.system_info",
return_value=DEFAULT_SYSTEM_INFO,
), patch(
"homeassistant.components.ruckus_unleashed.Ruckus.ap_info",
return_value=DEFAULT_AP_INFO,
), patch(
"homeassistant.components.ruckus_unleashed.RuckusUnleashedDataUpdateCoordinator._fetch_clients",
return_value={
TEST_CLIENT[API_MAC]: TEST_CLIENT,
},
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
|
import secrets
from uuid import uuid4
from pysmartthings import (
CLASSIFICATION_AUTOMATION,
AppEntity,
AppOAuthClient,
AppSettings,
DeviceEntity,
DeviceStatus,
InstalledApp,
InstalledAppStatus,
InstalledAppType,
Location,
SceneEntity,
SmartThings,
Subscription,
)
from pysmartthings.api import Api
import pytest
from homeassistant.components import webhook
from homeassistant.components.smartthings import DeviceBroker
from homeassistant.components.smartthings.const import (
APP_NAME_PREFIX,
CONF_APP_ID,
CONF_INSTALLED_APP_ID,
CONF_INSTANCE_ID,
CONF_LOCATION_ID,
CONF_REFRESH_TOKEN,
DATA_BROKERS,
DOMAIN,
SETTINGS_INSTANCE_ID,
STORAGE_KEY,
STORAGE_VERSION,
)
from homeassistant.config import async_process_ha_core_config
from homeassistant.config_entries import CONN_CLASS_CLOUD_PUSH, SOURCE_USER, ConfigEntry
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_WEBHOOK_ID,
)
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, patch
from tests.common import MockConfigEntry
from tests.components.light.conftest import mock_light_profiles # noqa
COMPONENT_PREFIX = "homeassistant.components.smartthings."
async def setup_platform(hass, platform: str, *, devices=None, scenes=None):
"""Set up the SmartThings platform and prerequisites."""
hass.config.components.add(DOMAIN)
config_entry = ConfigEntry(
2,
DOMAIN,
"Test",
{CONF_INSTALLED_APP_ID: str(uuid4())},
SOURCE_USER,
CONN_CLASS_CLOUD_PUSH,
system_options={},
)
broker = DeviceBroker(
hass, config_entry, Mock(), Mock(), devices or [], scenes or []
)
hass.data[DOMAIN] = {DATA_BROKERS: {config_entry.entry_id: broker}}
await hass.config_entries.async_forward_entry_setup(config_entry, platform)
await hass.async_block_till_done()
return config_entry
@pytest.fixture(autouse=True)
async def setup_component(hass, config_file, hass_storage):
"""Load the SmartThing component."""
hass_storage[STORAGE_KEY] = {"data": config_file, "version": STORAGE_VERSION}
await async_process_ha_core_config(
hass,
{"external_url": "https://test.local"},
)
await async_setup_component(hass, "smartthings", {})
def _create_location():
loc = Mock(Location)
loc.name = "Test Location"
loc.location_id = str(uuid4())
return loc
@pytest.fixture(name="location")
def location_fixture():
"""Fixture for a single location."""
return _create_location()
@pytest.fixture(name="locations")
def locations_fixture(location):
"""Fixture for 2 locations."""
return [location, _create_location()]
@pytest.fixture(name="app")
async def app_fixture(hass, config_file):
"""Fixture for a single app."""
app = Mock(AppEntity)
app.app_name = APP_NAME_PREFIX + str(uuid4())
app.app_id = str(uuid4())
app.app_type = "WEBHOOK_SMART_APP"
app.classifications = [CLASSIFICATION_AUTOMATION]
app.display_name = "Home Assistant"
app.description = f"{hass.config.location_name} at https://test.local"
app.single_instance = True
app.webhook_target_url = webhook.async_generate_url(
hass, hass.data[DOMAIN][CONF_WEBHOOK_ID]
)
settings = Mock(AppSettings)
settings.app_id = app.app_id
settings.settings = {SETTINGS_INSTANCE_ID: config_file[CONF_INSTANCE_ID]}
app.settings.return_value = settings
return app
@pytest.fixture(name="app_oauth_client")
def app_oauth_client_fixture():
"""Fixture for a single app's oauth."""
client = Mock(AppOAuthClient)
client.client_id = str(uuid4())
client.client_secret = str(uuid4())
return client
@pytest.fixture(name="app_settings")
def app_settings_fixture(app, config_file):
"""Fixture for an app settings."""
settings = Mock(AppSettings)
settings.app_id = app.app_id
settings.settings = {SETTINGS_INSTANCE_ID: config_file[CONF_INSTANCE_ID]}
return settings
def _create_installed_app(location_id, app_id):
item = Mock(InstalledApp)
item.installed_app_id = str(uuid4())
item.installed_app_status = InstalledAppStatus.AUTHORIZED
item.installed_app_type = InstalledAppType.WEBHOOK_SMART_APP
item.app_id = app_id
item.location_id = location_id
return item
@pytest.fixture(name="installed_app")
def installed_app_fixture(location, app):
"""Fixture for a single installed app."""
return _create_installed_app(location.location_id, app.app_id)
@pytest.fixture(name="installed_apps")
def installed_apps_fixture(installed_app, locations, app):
"""Fixture for 2 installed apps."""
return [installed_app, _create_installed_app(locations[1].location_id, app.app_id)]
@pytest.fixture(name="config_file")
def config_file_fixture():
"""Fixture representing the local config file contents."""
return {CONF_INSTANCE_ID: str(uuid4()), CONF_WEBHOOK_ID: secrets.token_hex()}
@pytest.fixture(name="smartthings_mock")
def smartthings_mock_fixture(locations):
"""Fixture to mock smartthings API calls."""
async def _location(location_id):
return next(
location for location in locations if location.location_id == location_id
)
smartthings_mock = Mock(SmartThings)
smartthings_mock.location.side_effect = _location
mock = Mock(return_value=smartthings_mock)
with patch(COMPONENT_PREFIX + "SmartThings", new=mock), patch(
COMPONENT_PREFIX + "config_flow.SmartThings", new=mock
), patch(COMPONENT_PREFIX + "smartapp.SmartThings", new=mock):
yield smartthings_mock
@pytest.fixture(name="device")
def device_fixture(location):
"""Fixture representing devices loaded."""
item = Mock(DeviceEntity)
item.device_id = "743de49f-036f-4e9c-839a-2f89d57607db"
item.name = "GE In-Wall Smart Dimmer"
item.label = "Front Porch Lights"
item.location_id = location.location_id
item.capabilities = [
"switch",
"switchLevel",
"refresh",
"indicator",
"sensor",
"actuator",
"healthCheck",
"light",
]
item.components = {"main": item.capabilities}
item.status = Mock(DeviceStatus)
return item
@pytest.fixture(name="config_entry")
def config_entry_fixture(hass, installed_app, location):
"""Fixture representing a config entry."""
data = {
CONF_ACCESS_TOKEN: str(uuid4()),
CONF_INSTALLED_APP_ID: installed_app.installed_app_id,
CONF_APP_ID: installed_app.app_id,
CONF_LOCATION_ID: location.location_id,
CONF_REFRESH_TOKEN: str(uuid4()),
CONF_CLIENT_ID: str(uuid4()),
CONF_CLIENT_SECRET: str(uuid4()),
}
return MockConfigEntry(
domain=DOMAIN,
data=data,
title=location.name,
version=2,
source=SOURCE_USER,
connection_class=CONN_CLASS_CLOUD_PUSH,
)
@pytest.fixture(name="subscription_factory")
def subscription_factory_fixture():
"""Fixture for creating mock subscriptions."""
def _factory(capability):
sub = Subscription()
sub.capability = capability
return sub
return _factory
@pytest.fixture(name="device_factory")
def device_factory_fixture():
"""Fixture for creating mock devices."""
api = Mock(Api)
api.post_device_command.return_value = {"results": [{"status": "ACCEPTED"}]}
def _factory(label, capabilities, status: dict = None):
device_data = {
"deviceId": str(uuid4()),
"name": "Device Type Handler Name",
"label": label,
"deviceManufacturerCode": "9135fc86-0929-4436-bf73-5d75f523d9db",
"locationId": "fcd829e9-82f4-45b9-acfd-62fda029af80",
"components": [
{
"id": "main",
"capabilities": [
{"id": capability, "version": 1} for capability in capabilities
],
}
],
"dth": {
"deviceTypeId": "b678b29d-2726-4e4f-9c3f-7aa05bd08964",
"deviceTypeName": "Switch",
"deviceNetworkType": "ZWAVE",
},
"type": "DTH",
}
device = DeviceEntity(api, data=device_data)
if status:
for attribute, value in status.items():
device.status.apply_attribute_update("main", "", attribute, value)
return device
return _factory
@pytest.fixture(name="scene_factory")
def scene_factory_fixture(location):
"""Fixture for creating mock devices."""
def _factory(name):
scene = Mock(SceneEntity)
scene.scene_id = str(uuid4())
scene.name = name
scene.location_id = location.location_id
return scene
return _factory
@pytest.fixture(name="scene")
def scene_fixture(scene_factory):
"""Fixture for an individual scene."""
return scene_factory("Test Scene")
@pytest.fixture(name="event_factory")
def event_factory_fixture():
"""Fixture for creating mock devices."""
def _factory(
device_id,
event_type="DEVICE_EVENT",
capability="",
attribute="Updated",
value="Value",
data=None,
):
event = Mock()
event.event_type = event_type
event.device_id = device_id
event.component_id = "main"
event.capability = capability
event.attribute = attribute
event.value = value
event.data = data
event.location_id = str(uuid4())
return event
return _factory
@pytest.fixture(name="event_request_factory")
def event_request_factory_fixture(event_factory):
"""Fixture for creating mock smartapp event requests."""
def _factory(device_ids=None, events=None):
request = Mock()
request.installed_app_id = uuid4()
if events is None:
events = []
if device_ids:
events.extend([event_factory(id) for id in device_ids])
events.append(event_factory(uuid4()))
events.append(event_factory(device_ids[0], event_type="OTHER"))
request.events = events
return request
return _factory
|
import spacy
from gensim.models import word2vec
from scattertext import whitespace_nlp_with_sentences
from scattertext import SampleCorpora, word_similarity_explorer_gensim, Word2VecFromParsedCorpus
from scattertext.CorpusFromParsedDocuments import CorpusFromParsedDocuments
from scattertext.termsignificance.ScaledFScoreSignificance import ScaledFScoreSignificance
def main():
nlp = spacy.load('en')
#nlp = whitespace_nlp_with_sentences
convention_df = SampleCorpora.ConventionData2012.get_data()
convention_df['parsed'] = convention_df.text.apply(nlp)
corpus = (CorpusFromParsedDocuments(convention_df,
category_col='party',
parsed_col='parsed')
.build()
.get_unigram_corpus())
model = word2vec.Word2Vec(size=100,
alpha=0.025,
window=5,
min_count=5,
max_vocab_size=None,
sample=0,
seed=1,
workers=1,
min_alpha=0.0001,
sg=1,
hs=1,
negative=0,
cbow_mean=0,
iter=10,
null_word=0,
trim_rule=None,
sorted_vocab=1)
html = word_similarity_explorer_gensim(corpus,
category='democrat',
target_term='jobs',
category_name='Democratic',
not_category_name='Republican',
minimum_term_frequency=5,
width_in_pixels=1000,
metadata=convention_df['speaker'],
word2vec=Word2VecFromParsedCorpus(corpus, model).train(),
term_significance=ScaledFScoreSignificance(),
max_p_val=0.05,
save_svg_button=True,
d3_url='scattertext/data/viz/scripts/d3.min.js',
d3_scale_chromatic_url='scattertext/data/viz/scripts/d3-scale-chromatic.v1.min.js')
open('./demo_gensim_similarity.html', 'wb').write(html.encode('utf-8'))
print('Open ./demo_gensim_similarity.html in Chrome or Firefox.')
if __name__ == '__main__':
main()
|
import os
import re
from io import BytesIO, StringIO
from zipfile import BadZipFile
from django.utils.translation import gettext_lazy as _
from openpyxl import Workbook, load_workbook
from openpyxl.cell.cell import TYPE_STRING
from translate.storage.csvl10n import csv
from weblate.formats.helpers import BytesIOMode
from weblate.formats.ttkit import CSVFormat
# use the same relugar expression as in openpyxl.cell
# to remove illegal characters
ILLEGAL_CHARACTERS_RE = re.compile(r"[\000-\010]|[\013-\014]|[\016-\037]")
class XlsxFormat(CSVFormat):
name = _("Excel Open XML")
format_id = "xlsx"
autoload = ("*.xlsx",)
def save_content(self, handle):
workbook = Workbook()
worksheet = workbook.active
worksheet.title = self.store.targetlanguage or "Weblate"
# write headers
for column, field in enumerate(self.store.fieldnames):
worksheet.cell(column=1 + column, row=1, value=field)
for row, unit in enumerate(self.store.units):
data = unit.todict()
for column, field in enumerate(self.store.fieldnames):
cell = worksheet.cell(column=1 + column, row=2 + row)
cell.data_type = TYPE_STRING
cell.value = ILLEGAL_CHARACTERS_RE.sub("", data[field])
workbook.save(handle)
@staticmethod
def serialize(store):
# store is CSV (csvfile) here
output = BytesIO()
XlsxFormat(store).save_content(output)
return output.getvalue()
@classmethod
def parse_store(cls, storefile):
# try to load the given file via openpyxl
# catch at least the BadZipFile exception if an unsupported
# file has been given
try:
workbook = load_workbook(filename=storefile)
worksheet = workbook.active
except BadZipFile:
return None, None
output = StringIO()
writer = csv.writer(output, dialect="unix")
for row in worksheet.rows:
writer.writerow([cell.value for cell in row])
if isinstance(storefile, str):
name = os.path.basename(storefile) + ".csv"
else:
name = os.path.basename(storefile.name) + ".csv"
# return the new csv as bytes
content = output.getvalue().encode()
# Load the file as CSV
return super().parse_store(BytesIOMode(name, content))
@staticmethod
def mimetype():
"""Return most common mime type for format."""
return "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
@staticmethod
def extension():
"""Return most common file extension for format."""
return "xlsx"
@classmethod
def create_new_file(cls, filename, language, base):
"""Handle creation of new translation file."""
if not base:
raise ValueError("Not supported")
# Parse file
store = cls.parse_store(base)
cls.untranslate_store(store, language)
with open(filename, "wb") as handle:
XlsxFormat(store).save_content(handle)
|
from django.db import transaction
from rest_framework import status
from rest_framework.generics import GenericAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.renderers import BrowsableAPIRenderer
from shop.models.address import ShippingAddressModel, BillingAddressModel
from shop.models.cart import CartModel
from shop.rest.money import JSONRenderer
class AddressEditView(GenericAPIView):
"""
View class to render associated addresses for the current user.
"""
renderer_classes = (JSONRenderer, BrowsableAPIRenderer)
permission_classes = (IsAuthenticated,)
form_class = None # must be overridde
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.visible_fields = [f.name for f in self.form_class().visible_fields()]
def get(self, request, priority=None, *args, **kwargs):
if priority == 'add':
# deliver an empty form
form = self.form_class()
else:
try:
if self.form_class.__name__ == 'BillingAddressForm':
instance = request.customer.billingaddress_set.get(priority=priority)
elif self.form_class.__name__ == 'ShippingAddressForm':
instance = request.customer.shippingaddress_set.get(priority=priority)
else:
raise CartModel.DoesNotExist()
except (ShippingAddressModel.DoesNotExist, BillingAddressModel.DoesNotExist):
return Response(status=status.HTTP_410_GONE)
else:
cart = CartModel.objects.get_from_request(request)
form = self.form_class(instance=instance, cart=cart)
initial_data = dict((k, v) for k, v in form.get_initial_data().items() if k in self.visible_fields)
initial_data.pop('use_primary_address', None)
return Response({self.form_class.form_name: initial_data})
def put(self, request, *args, **kwargs):
data = request.data.get(self.form_class.scope_prefix, {})
cart = CartModel.objects.get_from_request(request)
form = self.form_class(data=data, cart=cart)
if form.is_valid():
return Response()
response_data = {self.form_class.form_name: dict(errors=form.errors)}
return Response(response_data, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
def delete(self, request, priority=None, *args, **kwargs):
cart = CartModel.objects.get_from_request(request)
with transaction.atomic():
try:
if self.form_class.__name__ == 'BillingAddressForm':
request.customer.billingaddress_set.get(priority=priority).delete()
elif self.form_class.__name__ == 'ShippingAddressForm':
request.customer.shippingaddress_set.get(priority=priority).delete()
except (ValueError, ShippingAddressModel.DoesNotExist, BillingAddressModel.DoesNotExist):
pass
# take the last of the remaining addresses
if self.form_class.__name__ == 'BillingAddressForm':
instance = request.customer.billingaddress_set.last()
cart.billing_address = instance
elif self.form_class.__name__ == 'ShippingAddressForm':
instance = request.customer.shippingaddress_set.last()
cart.shipping_address = instance
cart.save()
# repopulate the form with address fields from the last remaining address
if instance:
form = self.form_class(instance=instance, cart=cart)
initial_data = dict((k, v) for k, v in form.get_initial_data().items() if k in self.visible_fields)
initial_data.update(active_priority=str(instance.priority), siblings_summary=form.siblings_summary)
return Response({self.form_class.form_name: initial_data})
return Response(status=status.HTTP_410_GONE)
|
import numpy as np
import time
import pytest
import jax.numpy as jnp
import jax.config as config
import torch
import tensorflow as tf
from tensornetwork.linalg import node_linalg
from tensornetwork.network_components import Node
from tensornetwork.backend_contextmanager import DefaultBackend
from tensornetwork import backends
from tensornetwork.backends.numpy import numpy_backend
from tensornetwork.backends.jax import jax_backend
#pylint: disable=no-member
config.update("jax_enable_x64", True)
np_real = [np.float32, np.float16, np.float64]
np_float = np_real + [np.complex64, np.complex128]
np_int = [np.int8, np.int16, np.int32, np.int64]
np_uint = [np.uint8, np.uint16, np.uint32, np.uint64]
np_dtypes = {
"real": np_real,
"float": np_float,
"rand": np_float,
"int": np_int + np_uint,
"all": np_real + np_int + np_uint + [
None,
]
}
tf_real = [tf.float32, tf.float16, tf.float64]
tf_float = tf_real + [tf.complex64, tf.complex128]
tf_int = [tf.int8, tf.int16, tf.int32, tf.int64]
tf_uint = [tf.uint8, tf.uint16, tf.uint32, tf.uint64]
tf_dtypes = {
"real": tf_real,
"float": tf_float,
"rand": tf_real + [
None,
],
"int": tf_int + tf_uint,
"all": tf_real + tf_int + tf_uint + [
None,
]
}
torch_float = [torch.float32, torch.float16, torch.float64]
torch_int = [torch.int8, torch.int16, torch.int32, torch.int64]
torch_uint = [torch.uint8]
torch_dtypes = {
"real": torch_float,
"float": torch_float,
"rand": [torch.float32, torch.float64, None],
"int": torch_int + torch_uint,
"all": torch_float + torch_int + torch_uint + [
None,
]
}
dtypes = {
"pytorch": torch_dtypes,
"jax": np_dtypes,
"numpy": np_dtypes,
"tensorflow": tf_dtypes
}
def test_eye(backend):
"""
Tests node_linalg.eye against np.eye.
"""
N = 4
M = 6
name = "Jeffrey"
axis_names = ["Sam", "Blinkey"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = node_linalg.eye(
N, dtype=dtype, M=M, name=name, axis_names=axis_names, backend=backend)
npI = backend_obj.eye(N, dtype=dtype, M=M)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_zeros(backend):
"""
Tests node_linalg.zeros against np.zeros.
"""
shape = (5, 10, 3)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = node_linalg.zeros(
shape, dtype=dtype, name=name, axis_names=axis_names, backend=backend)
npI = backend_obj.zeros(shape, dtype=dtype)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_ones(backend):
"""
Tests node_linalg.ones against np.ones.
"""
shape = (5, 10, 3)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = node_linalg.ones(
shape, dtype=dtype, name=name, axis_names=axis_names, backend=backend)
npI = backend_obj.ones(shape, dtype=dtype)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_randn(backend):
"""
Tests node_linalg.randn against the backend code.
"""
shape = (5, 10, 3, 2)
seed = int(time.time())
np.random.seed(seed=seed)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo", "Jarvis"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["rand"]:
tnI = node_linalg.randn(
shape,
dtype=dtype,
name=name,
axis_names=axis_names,
backend=backend,
seed=seed)
npI = backend_obj.randn(shape, dtype=dtype, seed=seed)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_random_uniform(backend):
"""
Tests node_linalg.ones against np.ones.
"""
shape = (5, 10, 3, 2)
seed = int(time.time())
np.random.seed(seed=seed)
boundaries = (-0.3, 10.5)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo", "Jarvis"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["rand"]:
tnI = node_linalg.random_uniform(
shape,
dtype=dtype,
name=name,
axis_names=axis_names,
backend=backend,
seed=seed,
boundaries=boundaries)
npI = backend_obj.random_uniform(
shape, dtype=dtype, seed=seed, boundaries=boundaries)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_conj(backend):
if backend == "pytorch":
pytest.skip("Complex numbers currently not supported in PyTorch")
a = Node(np.random.rand(3, 3) + 1j * np.random.rand(3, 3), backend=backend)
abar = node_linalg.conj(a)
np.testing.assert_allclose(abar.tensor, a.backend.conj(a.tensor))
def test_transpose(backend):
a = Node(np.random.rand(1, 2, 3, 4, 5), backend=backend)
order = [a[n] for n in reversed(range(5))]
transpa = node_linalg.transpose(a, [4, 3, 2, 1, 0])
a.reorder_edges(order)
np.testing.assert_allclose(a.tensor, transpa.tensor)
def test_operator_kron(backend):
with DefaultBackend(backend):
X = np.array([[0, 1], [1, 0]], dtype=np.float32)
Z = np.array([[1, 0], [0, -1]], dtype=np.float32)
expected = np.kron(X, Z).reshape(2, 2, 2, 2)
result = node_linalg.kron([Node(X), Node(Z)])
np.testing.assert_allclose(result.tensor, expected)
def test_kron_raises(backend):
with DefaultBackend(backend):
A = Node(np.ones((2, 2, 2)))
B = Node(np.ones((2, 2, 2)))
with pytest.raises(
ValueError, match="All operator tensors must have an even order."):
node_linalg.kron([A, B])
def test_norm_of_node_without_backend_raises_error():
node = np.random.rand(3, 3, 3)
with pytest.raises(AttributeError):
node_linalg.norm(node)
def test_conj_of_node_without_backend_raises_error():
node = np.random.rand(3, 3, 3)
with pytest.raises(AttributeError):
node_linalg.conj(node)
def test_transpose_of_node_without_backend_raises_error():
node = np.random.rand(3, 3, 3)
with pytest.raises(AttributeError):
node_linalg.transpose(node, permutation=[])
|
import sys
import types
import collections
from opcode import *
from opcode import __all__ as _opcodes_all
__all__ = ["dis", "disassemble", "distb", "disco",
"findlinestarts", "findlabels"] + _opcodes_all
del _opcodes_all
def dis(x=None):
for disline in disgen(x):
if disline.first and disline.offset > 0:
print()
print(format_dis_line(disline))
def format_dis_line(disline):
if disline.first:
lineno = "%3d" % disline.lineno
else:
lineno = " "
if disline.target:
label = ">>"
else:
label = " "
if disline.oparg is not None:
oparg = repr(disline.oparg)
else:
oparg = ""
return "%s %s %4r %-20s %5s %s" % (lineno, label, disline.offset, disline.opcode, oparg, disline.argstr)
def disgen(x=None):
"""Disassemble methods, functions, or code.
With no argument, disassemble the last traceback.
"""
if x is None:
return distb()
if hasattr(x, 'im_func'):
x = x.im_func
if hasattr(x, 'func_code'):
x = x.func_code
if hasattr(x, 'co_code'):
return disassemble(x)
else:
raise TypeError(
"don't know how to disassemble %s objects" %
type(x).__name__
)
def distb(tb=None):
"""Disassemble a traceback (default: last traceback)."""
if tb is None:
try:
tb = sys.last_traceback
except AttributeError:
raise RuntimeError("no last traceback to disassemble")
while tb.tb_next:
tb = tb.tb_next
return disassemble(tb.tb_frame.f_code, tb.tb_lasti)
DisLine = collections.namedtuple(
'DisLine',
"lineno first target offset opcode oparg argstr"
)
def disassemble(co, lasti=-1):
"""Disassemble a code object."""
code = co.co_code
labels = findlabels(code)
linestarts = dict(findlinestarts(co))
n = len(code)
i = 0
extended_arg = 0
free = None
dislines = []
lineno = linestarts[0]
while i < n:
op = byte_from_code(code, i)
first = i in linestarts
if first:
lineno = linestarts[i]
#if i == lasti: print '-->',
#else: print ' ',
target = i in labels
offset = i
opcode = opname[op]
i = i+1
if op >= HAVE_ARGUMENT:
oparg = byte_from_code(code, i) + byte_from_code(code, i+1)*256 + extended_arg
extended_arg = 0
i = i+2
if op == EXTENDED_ARG:
extended_arg = oparg*65536
if op in hasconst:
argstr = '(' + repr(co.co_consts[oparg]) + ')'
elif op in hasname:
argstr = '(' + co.co_names[oparg] + ')'
elif op in hasjabs:
argstr = '(-> ' + repr(oparg) + ')'
elif op in hasjrel:
argstr = '(-> ' + repr(i + oparg) + ')'
elif op in haslocal:
argstr = '(' + co.co_varnames[oparg] + ')'
elif op in hascompare:
argstr = '(' + cmp_op[oparg] + ')'
elif op in hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
argstr = '(' + free[oparg] + ')'
else:
argstr = ""
else:
oparg = None
argstr = ""
yield DisLine(lineno=lineno, first=first, target=target, offset=offset, opcode=opcode, oparg=oparg, argstr=argstr)
def byte_from_code(code, i):
byte = code[i]
if not isinstance(byte, int):
byte = ord(byte)
return byte
def findlabels(code):
"""Detect all offsets in a bytecode which are jump targets.
Return the list of offsets.
"""
labels = []
n = len(code)
i = 0
while i < n:
op = byte_from_code(code, i)
i = i+1
if op >= HAVE_ARGUMENT:
oparg = byte_from_code(code, i) + byte_from_code(code, i+1)*256
i = i+2
label = -1
if op in hasjrel:
label = i+oparg
elif op in hasjabs:
label = oparg
if label >= 0:
if label not in labels:
labels.append(label)
return labels
def findlinestarts(code):
"""Find the offsets in a bytecode which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c.
"""
byte_increments = [byte_from_code(code.co_lnotab, i) for i in range(0, len(code.co_lnotab), 2)]
line_increments = [byte_from_code(code.co_lnotab, i) for i in range(1, len(code.co_lnotab), 2)]
lastlineno = None
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if lineno != lastlineno:
yield (addr, lineno)
lastlineno = lineno
addr += byte_incr
lineno += line_incr
if lineno != lastlineno:
yield (addr, lineno)
def _test():
"""Simple test program to disassemble a file."""
if sys.argv[1:]:
if sys.argv[2:]:
sys.stderr.write("usage: python dis.py [-|file]\n")
sys.exit(2)
fn = sys.argv[1]
if not fn or fn == "-":
fn = None
else:
fn = None
if fn is None:
f = sys.stdin
else:
f = open(fn)
source = f.read()
if fn is not None:
f.close()
else:
fn = "<stdin>"
code = compile(source, fn, "exec")
dis(code)
if __name__ == "__main__":
_test()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import memcached_server
from perfkitbenchmarker.linux_packages import mutilate
FLAGS = flags.FLAGS
flags.DEFINE_string('memcached_mutilate_server_machine_type', None,
'Machine type to use for the memcached server if different '
'from memcached client machine type.')
flags.DEFINE_string('memcached_mutilate_client_machine_type', None,
'Machine type to use for the mutilate client if different '
'from memcached server machine type.')
flags.DEFINE_integer('memcached_mutilate_num_client_vms', 1,
'Number of mutilate client machines to use.')
flags.DEFINE_boolean('set_smp_affinity', False,
'Manually set smp affinity.')
BENCHMARK_NAME = 'memcached_mutilate'
BENCHMARK_CONFIG = """
memcached_mutilate:
description: Run mutilate against a memcached installation.
vm_groups:
server:
vm_spec: *default_single_core
vm_count: 1
client:
vm_spec: *default_dual_core
vm_count: 1
"""
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Args:
benchmark_config: The benchmark configuration.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
del benchmark_config
mutilate.CheckPrerequisites()
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS.memcached_mutilate_client_machine_type:
vm_spec = config['vm_groups']['client']['vm_spec']
for cloud in vm_spec:
vm_spec[cloud]['machine_type'] = (
FLAGS.memcached_mutilate_client_machine_type)
if FLAGS.memcached_mutilate_server_machine_type:
vm_spec = config['vm_groups']['server']['vm_spec']
for cloud in vm_spec:
vm_spec[cloud]['machine_type'] = (
FLAGS.memcached_mutilate_server_machine_type)
if FLAGS['memcached_mutilate_num_client_vms'].present:
config['vm_groups']['client']['vm_count'] = (
FLAGS.memcached_mutilate_num_client_vms)
return config
def _InstallMutilate(vm):
vm.Install('mutilate')
def _InstallMemcached(vm):
vm.Install('memcached_server')
def Prepare(benchmark_spec):
"""Prepare the virtual machines to run mutilate against memcached.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
clients = benchmark_spec.vm_groups['client']
master = clients[0]
server = benchmark_spec.vm_groups['server'][0]
client_install_fns = [
functools.partial(vm.Install, 'mutilate') for vm in clients]
server_install_fns = [functools.partial(server.Install, 'memcached_server')]
vm_util.RunThreaded(lambda f: f(), client_install_fns + server_install_fns)
memcached_server.ConfigureAndStart(
server, smp_affinity=FLAGS.set_smp_affinity)
mutilate.Load(master, server.internal_ip, memcached_server.MEMCACHED_PORT)
def Run(benchmark_spec):
"""Runs mutilate against memcached and gathers the results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample instances.
"""
clients = benchmark_spec.vm_groups['client']
server = benchmark_spec.vm_groups['server'][0]
server_ip = server.internal_ip
metadata = {'memcached_version': memcached_server.GetVersion(server),
'memcached_server_size': FLAGS.memcached_size_mb,
'memcached_server_threads': FLAGS.memcached_num_threads,
'smp_affinity': FLAGS.set_smp_affinity}
samples = mutilate.Run(
clients, server_ip, memcached_server.MEMCACHED_PORT)
for sample in samples:
sample.metadata.update(metadata)
return samples
def Cleanup(unused_benchmark_spec):
pass
|
import logging
from threading import Lock
from scsgate.connection import Connection
from scsgate.messages import ScenarioTriggeredMessage, StateMessage
from scsgate.reactor import Reactor
from scsgate.tasks import GetStatusTask
import voluptuous as vol
from homeassistant.const import CONF_DEVICE, CONF_NAME
from homeassistant.core import EVENT_HOMEASSISTANT_STOP
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_SCS_ID = "scs_id"
DOMAIN = "scsgate"
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Required(CONF_DEVICE): cv.string})}, extra=vol.ALLOW_EXTRA
)
SCSGATE_SCHEMA = vol.Schema(
{vol.Required(CONF_SCS_ID): cv.string, vol.Optional(CONF_NAME): cv.string}
)
def setup(hass, config):
"""Set up the SCSGate component."""
device = config[DOMAIN][CONF_DEVICE]
scsgate = None
try:
scsgate = SCSGate(device=device, logger=_LOGGER)
scsgate.start()
except Exception as exception: # pylint: disable=broad-except
_LOGGER.error("Cannot setup SCSGate component: %s", exception)
return False
def stop_monitor(event):
"""Stop the SCSGate."""
_LOGGER.info("Stopping SCSGate monitor thread")
scsgate.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_monitor)
hass.data[DOMAIN] = scsgate
return True
class SCSGate:
"""The class for dealing with the SCSGate device via scsgate.Reactor."""
def __init__(self, device, logger):
"""Initialize the SCSGate."""
self._logger = logger
self._devices = {}
self._devices_to_register = {}
self._devices_to_register_lock = Lock()
self._device_being_registered = None
self._device_being_registered_lock = Lock()
connection = Connection(device=device, logger=self._logger)
self._reactor = Reactor(
connection=connection,
logger=self._logger,
handle_message=self.handle_message,
)
def handle_message(self, message):
"""Handle a messages seen on the bus."""
self._logger.debug(f"Received message {message}")
if not isinstance(message, StateMessage) and not isinstance(
message, ScenarioTriggeredMessage
):
msg = f"Ignored message {message} - not relevant type"
self._logger.debug(msg)
return
if message.entity in self._devices:
new_device_activated = False
with self._devices_to_register_lock:
if message.entity == self._device_being_registered:
self._device_being_registered = None
new_device_activated = True
if new_device_activated:
self._activate_next_device()
try:
self._devices[message.entity].process_event(message)
except Exception as exception: # pylint: disable=broad-except
msg = f"Exception while processing event: {exception}"
self._logger.error(msg)
else:
self._logger.info(
"Ignoring state message for device {} because unknown".format(
message.entity
)
)
@property
def devices(self):
"""Return a dictionary with known devices.
Key is device ID, value is the device itself.
"""
return self._devices
def add_device(self, device):
"""Add the specified device.
The list contain already registered ones.
Beware: this is not what you usually want to do, take a look at
`add_devices_to_register`
"""
self._devices[device.scs_id] = device
def add_devices_to_register(self, devices):
"""List of devices to be registered."""
with self._devices_to_register_lock:
for device in devices:
self._devices_to_register[device.scs_id] = device
self._activate_next_device()
def _activate_next_device(self):
"""Start the activation of the first device."""
with self._devices_to_register_lock:
while self._devices_to_register:
_, device = self._devices_to_register.popitem()
self._devices[device.scs_id] = device
self._device_being_registered = device.scs_id
self._reactor.append_task(GetStatusTask(target=device.scs_id))
def is_device_registered(self, device_id):
"""Check whether a device is already registered or not."""
with self._devices_to_register_lock:
if device_id in self._devices_to_register:
return False
with self._device_being_registered_lock:
if device_id == self._device_being_registered:
return False
return True
def start(self):
"""Start the scsgate.Reactor."""
self._reactor.start()
def stop(self):
"""Stop the scsgate.Reactor."""
self._reactor.stop()
def append_task(self, task):
"""Register a new task to be executed."""
self._reactor.append_task(task)
|
import copy
import json
from homeassistant.components.tasmota.const import DEFAULT_PREFIX
from homeassistant.components.tasmota.discovery import ALREADY_DISCOVERED
from .conftest import setup_tasmota_helper
from .test_common import DEFAULT_CONFIG
from tests.async_mock import patch
from tests.common import async_fire_mqtt_message
async def test_subscribing_config_topic(hass, mqtt_mock, setup_tasmota):
"""Test setting up discovery."""
discovery_topic = DEFAULT_PREFIX
assert mqtt_mock.async_subscribe.called
call_args = mqtt_mock.async_subscribe.mock_calls[0][1]
assert call_args[0] == discovery_topic + "/#"
assert call_args[2] == 0
async def test_valid_discovery_message(hass, mqtt_mock, caplog):
"""Test discovery callback called."""
config = copy.deepcopy(DEFAULT_CONFIG)
with patch(
"homeassistant.components.tasmota.discovery.tasmota_get_device_config",
return_value={},
) as mock_tasmota_get_device_config:
await setup_tasmota_helper(hass)
async_fire_mqtt_message(
hass, f"{DEFAULT_PREFIX}/00000049A3BC/config", json.dumps(config)
)
await hass.async_block_till_done()
assert mock_tasmota_get_device_config.called
async def test_invalid_topic(hass, mqtt_mock):
"""Test receiving discovery message on wrong topic."""
with patch(
"homeassistant.components.tasmota.discovery.tasmota_get_device_config"
) as mock_tasmota_get_device_config:
await setup_tasmota_helper(hass)
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/123456/configuration", "{}")
await hass.async_block_till_done()
assert not mock_tasmota_get_device_config.called
async def test_invalid_message(hass, mqtt_mock, caplog):
"""Test receiving an invalid message."""
with patch(
"homeassistant.components.tasmota.discovery.tasmota_get_device_config"
) as mock_tasmota_get_device_config:
await setup_tasmota_helper(hass)
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/123456/config", "asd")
await hass.async_block_till_done()
assert "Invalid discovery message" in caplog.text
assert not mock_tasmota_get_device_config.called
async def test_invalid_mac(hass, mqtt_mock, caplog):
"""Test topic is not matching device MAC."""
config = copy.deepcopy(DEFAULT_CONFIG)
with patch(
"homeassistant.components.tasmota.discovery.tasmota_get_device_config"
) as mock_tasmota_get_device_config:
await setup_tasmota_helper(hass)
async_fire_mqtt_message(
hass, f"{DEFAULT_PREFIX}/00000049A3BA/config", json.dumps(config)
)
await hass.async_block_till_done()
assert "MAC mismatch" in caplog.text
assert not mock_tasmota_get_device_config.called
async def test_correct_config_discovery(
hass, mqtt_mock, caplog, device_reg, entity_reg, setup_tasmota
):
"""Test receiving valid discovery message."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
# Verify device and registry entries are created
device_entry = device_reg.async_get_device(set(), {("mac", mac)})
assert device_entry is not None
entity_entry = entity_reg.async_get("switch.test")
assert entity_entry is not None
state = hass.states.get("switch.test")
assert state is not None
assert state.name == "Test"
assert (mac, "switch", "relay", 0) in hass.data[ALREADY_DISCOVERED]
async def test_device_discover(
hass, mqtt_mock, caplog, device_reg, entity_reg, setup_tasmota
):
"""Test setting up a device."""
config = copy.deepcopy(DEFAULT_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
# Verify device and registry entries are created
device_entry = device_reg.async_get_device(set(), {("mac", mac)})
assert device_entry is not None
assert device_entry.manufacturer == "Tasmota"
assert device_entry.model == config["md"]
assert device_entry.name == config["dn"]
assert device_entry.sw_version == config["sw"]
async def test_device_update(
hass, mqtt_mock, caplog, device_reg, entity_reg, setup_tasmota
):
"""Test updating a device."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["md"] = "Model 1"
config["dn"] = "Name 1"
config["sw"] = "v1.2.3.4"
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
# Verify device entry is created
device_entry = device_reg.async_get_device(set(), {("mac", mac)})
assert device_entry is not None
# Update device parameters
config["md"] = "Another model"
config["dn"] = "Another name"
config["sw"] = "v6.6.6"
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
# Verify device entry is updated
device_entry = device_reg.async_get_device(set(), {("mac", mac)})
assert device_entry is not None
assert device_entry.model == "Another model"
assert device_entry.name == "Another name"
assert device_entry.sw_version == "v6.6.6"
async def test_device_remove(
hass, mqtt_mock, caplog, device_reg, entity_reg, setup_tasmota
):
"""Test removing a discovered device."""
config = copy.deepcopy(DEFAULT_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
# Verify device entry is created
device_entry = device_reg.async_get_device(set(), {("mac", mac)})
assert device_entry is not None
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
"",
)
await hass.async_block_till_done()
# Verify device entry is removed
device_entry = device_reg.async_get_device(set(), {("mac", mac)})
assert device_entry is None
async def test_device_remove_stale(hass, mqtt_mock, caplog, device_reg, setup_tasmota):
"""Test removing a stale (undiscovered) device does not throw."""
mac = "00000049A3BC"
config_entry = hass.config_entries.async_entries("tasmota")[0]
# Create a device
device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={("mac", mac)},
)
# Verify device entry was created
device_entry = device_reg.async_get_device(set(), {("mac", mac)})
assert device_entry is not None
# Remove the device
device_reg.async_remove_device(device_entry.id)
# Verify device entry is removed
device_entry = device_reg.async_get_device(set(), {("mac", mac)})
assert device_entry is None
async def test_device_rediscover(
hass, mqtt_mock, caplog, device_reg, entity_reg, setup_tasmota
):
"""Test removing a device."""
config = copy.deepcopy(DEFAULT_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
# Verify device entry is created
device_entry1 = device_reg.async_get_device(set(), {("mac", mac)})
assert device_entry1 is not None
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
"",
)
await hass.async_block_till_done()
# Verify device entry is removed
device_entry = device_reg.async_get_device(set(), {("mac", mac)})
assert device_entry is None
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
# Verify device entry is created, and id is reused
device_entry = device_reg.async_get_device(set(), {("mac", mac)})
assert device_entry is not None
assert device_entry1.id == device_entry.id
async def test_entity_duplicate_discovery(hass, mqtt_mock, caplog, setup_tasmota):
"""Test entities are not duplicated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("switch.test")
state_duplicate = hass.states.get("binary_sensor.beer1")
assert state is not None
assert state.name == "Test"
assert state_duplicate is None
assert (
f"Entity already added, sending update: switch ('{mac}', 'switch', 'relay', 0)"
in caplog.text
)
async def test_entity_duplicate_removal(hass, mqtt_mock, caplog, setup_tasmota):
"""Test removing entity twice."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
config["rl"][0] = 0
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
assert f"Removing entity: switch ('{mac}', 'switch', 'relay', 0)" in caplog.text
caplog.clear()
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
assert "Removing entity: switch" not in caplog.text
|
import logging
# pylint: disable=import-error
from beacontools import BeaconScanner, EddystoneFilter, EddystoneTLMFrame
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
STATE_UNKNOWN,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_BEACONS = "beacons"
CONF_BT_DEVICE_ID = "bt_device_id"
CONF_INSTANCE = "instance"
CONF_NAMESPACE = "namespace"
BEACON_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAMESPACE): cv.string,
vol.Required(CONF_INSTANCE): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_BT_DEVICE_ID, default=0): cv.positive_int,
vol.Required(CONF_BEACONS): vol.Schema({cv.string: BEACON_SCHEMA}),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Validate configuration, create devices and start monitoring thread."""
bt_device_id = config.get("bt_device_id")
beacons = config.get(CONF_BEACONS)
devices = []
for dev_name, properties in beacons.items():
namespace = get_from_conf(properties, CONF_NAMESPACE, 20)
instance = get_from_conf(properties, CONF_INSTANCE, 12)
name = properties.get(CONF_NAME, dev_name)
if instance is None or namespace is None:
_LOGGER.error("Skipping %s", dev_name)
continue
devices.append(EddystoneTemp(name, namespace, instance))
if devices:
mon = Monitor(hass, devices, bt_device_id)
def monitor_stop(_service_or_event):
"""Stop the monitor thread."""
_LOGGER.info("Stopping scanner for Eddystone beacons")
mon.stop()
def monitor_start(_service_or_event):
"""Start the monitor thread."""
_LOGGER.info("Starting scanner for Eddystone beacons")
mon.start()
add_entities(devices)
mon.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, monitor_start)
else:
_LOGGER.warning("No devices were added")
def get_from_conf(config, config_key, length):
"""Retrieve value from config and validate length."""
string = config.get(config_key)
if len(string) != length:
_LOGGER.error(
"Error in configuration parameter %s: Must be exactly %d "
"bytes. Device will not be added",
config_key,
length / 2,
)
return None
return string
class EddystoneTemp(Entity):
"""Representation of a temperature sensor."""
def __init__(self, name, namespace, instance):
"""Initialize a sensor."""
self._name = name
self.namespace = namespace
self.instance = instance
self.bt_addr = None
self.temperature = STATE_UNKNOWN
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self.temperature
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return TEMP_CELSIUS
@property
def should_poll(self):
"""Return the polling state."""
return False
class Monitor:
"""Continuously scan for BLE advertisements."""
def __init__(self, hass, devices, bt_device_id):
"""Construct interface object."""
self.hass = hass
# List of beacons to monitor
self.devices = devices
# Number of the bt device (hciX)
self.bt_device_id = bt_device_id
def callback(bt_addr, _, packet, additional_info):
"""Handle new packets."""
self.process_packet(
additional_info["namespace"],
additional_info["instance"],
packet.temperature,
)
device_filters = [EddystoneFilter(d.namespace, d.instance) for d in devices]
self.scanner = BeaconScanner(
callback, bt_device_id, device_filters, EddystoneTLMFrame
)
self.scanning = False
def start(self):
"""Continuously scan for BLE advertisements."""
if not self.scanning:
self.scanner.start()
self.scanning = True
else:
_LOGGER.debug("start() called, but scanner is already running")
def process_packet(self, namespace, instance, temperature):
"""Assign temperature to device."""
_LOGGER.debug(
"Received temperature for <%s,%s>: %d", namespace, instance, temperature
)
for dev in self.devices:
if dev.namespace == namespace and dev.instance == instance:
if dev.temperature != temperature:
dev.temperature = temperature
dev.schedule_update_ha_state()
def stop(self):
"""Signal runner to stop and join thread."""
if self.scanning:
_LOGGER.debug("Stopping...")
self.scanner.stop()
_LOGGER.debug("Stopped")
self.scanning = False
else:
_LOGGER.debug("stop() called but scanner was not running")
|
import numpy as np
from PIL import Image
import tempfile
import unittest
from chainer import testing
from chainercv.utils import write_image
@testing.parameterize(*testing.product({
'file_obj': [False, True],
'format': ['bmp', 'jpeg', 'png'],
'size': [(48, 32)],
'color': [True, False],
'dtype': [np.float32, np.uint8, bool],
}))
class TestWriteImage(unittest.TestCase):
def setUp(self):
if self.file_obj:
self.f = tempfile.TemporaryFile()
self.file = self.f
else:
if self.format == 'jpeg':
suffix = '.jpg'
else:
suffix = '.' + self.format
self.f = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
self.file = self.f.name
if self.color:
shape = (3,) + self.size
else:
shape = (1,) + self.size
self.img = np.random.randint(0, 255, size=shape).astype(self.dtype)
def test_write_image(self):
if self.file_obj:
write_image(self.img, self.file, format=self.format)
self.file.seek(0)
else:
write_image(self.img, self.file)
img = Image.open(self.file)
W, H = img.size
self.assertEqual((H, W), self.size)
if self.color:
self.assertEqual(len(img.getbands()), 3)
else:
self.assertEqual(len(img.getbands()), 1)
if self.format in {'bmp', 'png'}:
img = np.asarray(img)
if img.ndim == 2:
# reshape (H, W) -> (1, H, W)
img = img[np.newaxis]
else:
# transpose (H, W, C) -> (C, H, W)
img = img.transpose((2, 0, 1))
np.testing.assert_equal(img, self.img)
testing.run_module(__name__, __file__)
|
from functools import partial
import logging
from asterisk_mbox import ServerError
from homeassistant.components.mailbox import CONTENT_TYPE_MPEG, Mailbox, StreamError
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DOMAIN as ASTERISK_DOMAIN
_LOGGER = logging.getLogger(__name__)
SIGNAL_MESSAGE_REQUEST = "asterisk_mbox.message_request"
SIGNAL_MESSAGE_UPDATE = "asterisk_mbox.message_updated"
async def async_get_handler(hass, config, discovery_info=None):
"""Set up the Asterix VM platform."""
return AsteriskMailbox(hass, ASTERISK_DOMAIN)
class AsteriskMailbox(Mailbox):
"""Asterisk VM Sensor."""
def __init__(self, hass, name):
"""Initialize Asterisk mailbox."""
super().__init__(hass, name)
async_dispatcher_connect(
self.hass, SIGNAL_MESSAGE_UPDATE, self._update_callback
)
@callback
def _update_callback(self, msg):
"""Update the message count in HA, if needed."""
self.async_update()
@property
def media_type(self):
"""Return the supported media type."""
return CONTENT_TYPE_MPEG
@property
def can_delete(self):
"""Return if messages can be deleted."""
return True
@property
def has_media(self):
"""Return if messages have attached media files."""
return True
async def async_get_media(self, msgid):
"""Return the media blob for the msgid."""
client = self.hass.data[ASTERISK_DOMAIN].client
try:
return await self.hass.async_add_executor_job(
partial(client.mp3, msgid, sync=True)
)
except ServerError as err:
raise StreamError(err) from err
async def async_get_messages(self):
"""Return a list of the current messages."""
return self.hass.data[ASTERISK_DOMAIN].messages
async def async_delete(self, msgid):
"""Delete the specified messages."""
client = self.hass.data[ASTERISK_DOMAIN].client
_LOGGER.info("Deleting: %s", msgid)
await self.hass.async_add_executor_job(client.delete, msgid)
return True
|
from homeassistant.components.media_player import BrowseError, BrowseMedia
from homeassistant.components.media_player.const import (
MEDIA_CLASS_ALBUM,
MEDIA_CLASS_ARTIST,
MEDIA_CLASS_DIRECTORY,
MEDIA_CLASS_GENRE,
MEDIA_CLASS_PLAYLIST,
MEDIA_CLASS_TRACK,
MEDIA_TYPE_ALBUM,
MEDIA_TYPE_ARTIST,
MEDIA_TYPE_GENRE,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_TRACK,
)
LIBRARY = ["Artists", "Albums", "Tracks", "Playlists", "Genres"]
MEDIA_TYPE_TO_SQUEEZEBOX = {
"Artists": "artists",
"Albums": "albums",
"Tracks": "titles",
"Playlists": "playlists",
"Genres": "genres",
MEDIA_TYPE_ALBUM: "album",
MEDIA_TYPE_ARTIST: "artist",
MEDIA_TYPE_TRACK: "title",
MEDIA_TYPE_PLAYLIST: "playlist",
MEDIA_TYPE_GENRE: "genre",
}
SQUEEZEBOX_ID_BY_TYPE = {
MEDIA_TYPE_ALBUM: "album_id",
MEDIA_TYPE_ARTIST: "artist_id",
MEDIA_TYPE_TRACK: "track_id",
MEDIA_TYPE_PLAYLIST: "playlist_id",
MEDIA_TYPE_GENRE: "genre_id",
}
CONTENT_TYPE_MEDIA_CLASS = {
"Artists": {"item": MEDIA_CLASS_DIRECTORY, "children": MEDIA_CLASS_ARTIST},
"Albums": {"item": MEDIA_CLASS_DIRECTORY, "children": MEDIA_CLASS_ALBUM},
"Tracks": {"item": MEDIA_CLASS_DIRECTORY, "children": MEDIA_CLASS_TRACK},
"Playlists": {"item": MEDIA_CLASS_DIRECTORY, "children": MEDIA_CLASS_PLAYLIST},
"Genres": {"item": MEDIA_CLASS_DIRECTORY, "children": MEDIA_CLASS_GENRE},
MEDIA_TYPE_ALBUM: {"item": MEDIA_CLASS_ALBUM, "children": MEDIA_CLASS_TRACK},
MEDIA_TYPE_ARTIST: {"item": MEDIA_CLASS_ARTIST, "children": MEDIA_CLASS_ALBUM},
MEDIA_TYPE_TRACK: {"item": MEDIA_CLASS_TRACK, "children": None},
MEDIA_TYPE_GENRE: {"item": MEDIA_CLASS_GENRE, "children": MEDIA_CLASS_ARTIST},
MEDIA_TYPE_PLAYLIST: {
"item": MEDIA_CLASS_PLAYLIST,
"children": MEDIA_CLASS_TRACK,
},
}
CONTENT_TYPE_TO_CHILD_TYPE = {
MEDIA_TYPE_ALBUM: MEDIA_TYPE_TRACK,
MEDIA_TYPE_PLAYLIST: MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_ARTIST: MEDIA_TYPE_ALBUM,
MEDIA_TYPE_GENRE: MEDIA_TYPE_ARTIST,
"Artists": MEDIA_TYPE_ARTIST,
"Albums": MEDIA_TYPE_ALBUM,
"Tracks": MEDIA_TYPE_TRACK,
"Playlists": MEDIA_TYPE_PLAYLIST,
"Genres": MEDIA_TYPE_GENRE,
}
BROWSE_LIMIT = 1000
async def build_item_response(player, payload):
"""Create response payload for search described by payload."""
search_id = payload["search_id"]
search_type = payload["search_type"]
media_class = CONTENT_TYPE_MEDIA_CLASS[search_type]
if search_id and search_id != search_type:
browse_id = (SQUEEZEBOX_ID_BY_TYPE[search_type], search_id)
else:
browse_id = None
result = await player.async_browse(
MEDIA_TYPE_TO_SQUEEZEBOX[search_type],
limit=BROWSE_LIMIT,
browse_id=browse_id,
)
children = None
if result is not None and result.get("items"):
item_type = CONTENT_TYPE_TO_CHILD_TYPE[search_type]
child_media_class = CONTENT_TYPE_MEDIA_CLASS[item_type]
children = []
for item in result["items"]:
children.append(
BrowseMedia(
title=item["title"],
media_class=child_media_class["item"],
media_content_id=str(item["id"]),
media_content_type=item_type,
can_play=True,
can_expand=child_media_class["children"] is not None,
thumbnail=item.get("image_url"),
)
)
if children is None:
raise BrowseError(f"Media not found: {search_type} / {search_id}")
return BrowseMedia(
title=result.get("title"),
media_class=media_class["item"],
children_media_class=media_class["children"],
media_content_id=search_id,
media_content_type=search_type,
can_play=True,
children=children,
can_expand=True,
)
async def library_payload(player):
"""Create response payload to describe contents of library."""
library_info = {
"title": "Music Library",
"media_class": MEDIA_CLASS_DIRECTORY,
"media_content_id": "library",
"media_content_type": "library",
"can_play": False,
"can_expand": True,
"children": [],
}
for item in LIBRARY:
media_class = CONTENT_TYPE_MEDIA_CLASS[item]
result = await player.async_browse(
MEDIA_TYPE_TO_SQUEEZEBOX[item],
limit=1,
)
if result is not None and result.get("items") is not None:
library_info["children"].append(
BrowseMedia(
title=item,
media_class=media_class["children"],
media_content_id=item,
media_content_type=item,
can_play=True,
can_expand=True,
)
)
response = BrowseMedia(**library_info)
return response
async def generate_playlist(player, payload):
"""Generate playlist from browsing payload."""
media_type = payload["search_type"]
media_id = payload["search_id"]
if media_type not in SQUEEZEBOX_ID_BY_TYPE:
return None
browse_id = (SQUEEZEBOX_ID_BY_TYPE[media_type], media_id)
result = await player.async_browse(
"titles", limit=BROWSE_LIMIT, browse_id=browse_id
)
return result.get("items")
|
from abc import ABC, abstractmethod
from homeassistant.core import callback
from .state_report import async_enable_proactive_mode
class AbstractConfig(ABC):
"""Hold the configuration for Alexa."""
_unsub_proactive_report = None
def __init__(self, hass):
"""Initialize abstract config."""
self.hass = hass
@property
def supports_auth(self):
"""Return if config supports auth."""
return False
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
return False
@property
def endpoint(self):
"""Endpoint for report state."""
return None
@property
@abstractmethod
def locale(self):
"""Return config locale."""
@property
def entity_config(self):
"""Return entity config."""
return {}
@property
def is_reporting_states(self):
"""Return if proactive mode is enabled."""
return self._unsub_proactive_report is not None
async def async_enable_proactive_mode(self):
"""Enable proactive mode."""
if self._unsub_proactive_report is None:
self._unsub_proactive_report = self.hass.async_create_task(
async_enable_proactive_mode(self.hass, self)
)
try:
await self._unsub_proactive_report
except Exception:
self._unsub_proactive_report = None
raise
async def async_disable_proactive_mode(self):
"""Disable proactive mode."""
unsub_func = await self._unsub_proactive_report
if unsub_func:
unsub_func()
self._unsub_proactive_report = None
@callback
def should_expose(self, entity_id):
"""If an entity should be exposed."""
# pylint: disable=no-self-use
return False
@callback
def async_invalidate_access_token(self):
"""Invalidate access token."""
raise NotImplementedError
async def async_get_access_token(self):
"""Get an access token."""
raise NotImplementedError
async def async_accept_grant(self, code):
"""Accept a grant."""
raise NotImplementedError
|
from flask import Flask, jsonify
from flasgger import Swagger, swag_from
app = Flask(__name__)
swagger_config_partial = {
"specs": [
{
"endpoint": "swagger",
"route": "/characteristics/swagger.json",
"rule_filter": lambda rule: True, # all in
"model_filter": lambda tag: True, # all in
}
],
"title": "Product Characteristics APIs",
"version": '',
"termsOfService": "",
"static_url_path": "/characteristics/static",
"specs_route": "/characteristics/swagger/",
"description": "",
"securityDefinitions": {
"oAuthSample": {
"type": "oauth2",
"flow": "application",
"tokenUrl": "https://api.pgsmartshopassistant.com/o/token/",
}
}
}
colors_spec = {
"tags": [
"colors"
],
"parameters": [
{
"name": "palette",
"in": "path",
"type": "string",
"enum": [
"all",
"rgb",
"cmyk"
],
"required": True,
"default": "all",
"description": "Which palette to filter?"
}
],
"operationId": "get_colors",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"security": {
"colors_oauth": {
"$ref": "#/securityDefinitions/oAuthSample"
}
},
"schemes": [
"http",
"https"
],
"externalDocs": {
"description": "Project repository",
"url": "http://github.com/rochacbruno/flasgger"
},
"definitions": {
"Palette": {
"type": "object",
"properties": {
"palette_name": {
"type": "array",
"items": {
"$ref": "#/definitions/Color"
}
}
}
},
"Color": {
"type": "string"
}
},
"responses": {
"200": {
"description": "A list of colors (may be filtered by palette)",
"schema": {
"$ref": "#/definitions/Palette"
},
"examples": {
"rgb": [
"red",
"green",
"blue"
]
}
}
}
}
@app.route('/colors/<palette>/')
@swag_from(colors_spec)
def colors(palette):
"""
Example using a dictionary as specification
This is the description
You can also set 'summary' and 'description' in
specs_dict
---
# values here overrides the specs dict
"""
all_colors = {
'cmyk': ['cian', 'magenta', 'yellow', 'black'],
'rgb': ['red', 'green', 'blue']
}
if palette == 'all':
result = all_colors
else:
result = {palette: all_colors.get(palette)}
return jsonify(result)
swag = Swagger(app, config=swagger_config_partial, merge=True)
def test_swag(client, specs_data):
"""
This test is runs automatically in Travis CI
:param client: Flask app test client
:param specs_data: {'url': {swag_specs}} for every spec in app
"""
for spec in specs_data.values():
assert 'securityDefinitions' in spec
assert 'oAuthSample' in spec['securityDefinitions']
if __name__ == '__main__':
app.run(debug=True)
|
import asyncio
from datetime import timedelta
import logging
import pytest
from samsungctl import exceptions
from samsungtvws.exceptions import ConnectionFailure
from websocket import WebSocketException
from homeassistant.components.media_player import DEVICE_CLASS_TV
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_VOLUME_MUTED,
DOMAIN,
MEDIA_TYPE_CHANNEL,
MEDIA_TYPE_URL,
SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOURCE,
SUPPORT_TURN_ON,
)
from homeassistant.components.samsungtv.const import (
CONF_ON_ACTION,
DOMAIN as SAMSUNGTV_DOMAIN,
)
from homeassistant.components.samsungtv.media_player import SUPPORT_SAMSUNGTV
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_SUPPORTED_FEATURES,
CONF_HOST,
CONF_IP_ADDRESS,
CONF_METHOD,
CONF_NAME,
CONF_PORT,
CONF_TOKEN,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_UP,
STATE_OFF,
STATE_ON,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import DEFAULT as DEFAULT_MOCK, Mock, PropertyMock, call, patch
from tests.common import MockConfigEntry, async_fire_time_changed
ENTITY_ID = f"{DOMAIN}.fake"
MOCK_CONFIG = {
SAMSUNGTV_DOMAIN: [
{
CONF_HOST: "fake",
CONF_NAME: "fake",
CONF_PORT: 55000,
CONF_ON_ACTION: [{"delay": "00:00:01"}],
}
]
}
MOCK_CONFIGWS = {
SAMSUNGTV_DOMAIN: [
{
CONF_HOST: "fake",
CONF_NAME: "fake",
CONF_PORT: 8001,
CONF_TOKEN: "123456789",
CONF_ON_ACTION: [{"delay": "00:00:01"}],
}
]
}
MOCK_CALLS_WS = {
"host": "fake",
"port": 8001,
"token": None,
"timeout": 31,
"name": "HomeAssistant",
}
MOCK_ENTRY_WS = {
CONF_IP_ADDRESS: "test",
CONF_HOST: "fake",
CONF_METHOD: "websocket",
CONF_NAME: "fake",
CONF_PORT: 8001,
CONF_TOKEN: "abcde",
}
MOCK_CALLS_ENTRY_WS = {
"host": "fake",
"name": "HomeAssistant",
"port": 8001,
"timeout": 8,
"token": "abcde",
}
ENTITY_ID_NOTURNON = f"{DOMAIN}.fake_noturnon"
MOCK_CONFIG_NOTURNON = {
SAMSUNGTV_DOMAIN: [
{CONF_HOST: "fake_noturnon", CONF_NAME: "fake_noturnon", CONF_PORT: 55000}
]
}
@pytest.fixture(name="remote")
def remote_fixture():
"""Patch the samsungctl Remote."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote"
) as remote_class, patch(
"homeassistant.components.samsungtv.config_flow.socket"
) as socket1, patch(
"homeassistant.components.samsungtv.socket"
) as socket2:
remote = Mock()
remote.__enter__ = Mock()
remote.__exit__ = Mock()
remote_class.return_value = remote
socket1.gethostbyname.return_value = "FAKE_IP_ADDRESS"
socket2.gethostbyname.return_value = "FAKE_IP_ADDRESS"
yield remote
@pytest.fixture(name="remotews")
def remotews_fixture():
"""Patch the samsungtvws SamsungTVWS."""
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS"
) as remote_class, patch(
"homeassistant.components.samsungtv.config_flow.socket"
) as socket1, patch(
"homeassistant.components.samsungtv.socket"
) as socket2:
remote = Mock()
remote.__enter__ = Mock()
remote.__exit__ = Mock()
remote_class.return_value = remote
remote_class().__enter__().token = "FAKE_TOKEN"
socket1.gethostbyname.return_value = "FAKE_IP_ADDRESS"
socket2.gethostbyname.return_value = "FAKE_IP_ADDRESS"
yield remote
@pytest.fixture(name="delay")
def delay_fixture():
"""Patch the delay script function."""
with patch(
"homeassistant.components.samsungtv.media_player.Script.async_run"
) as delay:
yield delay
@pytest.fixture
def mock_now():
"""Fixture for dtutil.now."""
return dt_util.utcnow()
async def setup_samsungtv(hass, config):
"""Set up mock Samsung TV."""
await async_setup_component(hass, SAMSUNGTV_DOMAIN, config)
await hass.async_block_till_done()
async def test_setup_with_turnon(hass, remote):
"""Test setup of platform."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert hass.states.get(ENTITY_ID)
async def test_setup_without_turnon(hass, remote):
"""Test setup of platform."""
await setup_samsungtv(hass, MOCK_CONFIG_NOTURNON)
assert hass.states.get(ENTITY_ID_NOTURNON)
async def test_setup_websocket(hass, remotews, mock_now):
"""Test setup of platform."""
with patch("homeassistant.components.samsungtv.bridge.SamsungTVWS") as remote_class:
enter = Mock()
type(enter).token = PropertyMock(return_value="987654321")
remote = Mock()
remote.__enter__ = Mock(return_value=enter)
remote.__exit__ = Mock()
remote_class.return_value = remote
await setup_samsungtv(hass, MOCK_CONFIGWS)
assert remote_class.call_count == 1
assert remote_class.call_args_list == [call(**MOCK_CALLS_WS)]
assert hass.states.get(ENTITY_ID)
async def test_setup_websocket_2(hass, mock_now):
"""Test setup of platform from config entry."""
entity_id = f"{DOMAIN}.fake"
entry = MockConfigEntry(
domain=SAMSUNGTV_DOMAIN,
data=MOCK_ENTRY_WS,
unique_id=entity_id,
)
entry.add_to_hass(hass)
config_entries = hass.config_entries.async_entries(SAMSUNGTV_DOMAIN)
assert len(config_entries) == 1
assert entry is config_entries[0]
assert await async_setup_component(hass, SAMSUNGTV_DOMAIN, {})
await hass.async_block_till_done()
next_update = mock_now + timedelta(minutes=5)
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS"
) as remote, patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert remote.call_count == 1
assert remote.call_args_list == [call(**MOCK_CALLS_ENTRY_WS)]
async def test_update_on(hass, remote, mock_now):
"""Testing update tv on."""
await setup_samsungtv(hass, MOCK_CONFIG)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_ON
async def test_update_off(hass, remote, mock_now):
"""Testing update tv off."""
await setup_samsungtv(hass, MOCK_CONFIG)
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=[OSError("Boom"), DEFAULT_MOCK],
):
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_OFF
async def test_update_access_denied(hass, remote, mock_now):
"""Testing update tv access denied exception."""
await setup_samsungtv(hass, MOCK_CONFIG)
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=exceptions.AccessDenied("Boom"),
):
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert [
flow
for flow in hass.config_entries.flow.async_progress()
if flow["context"]["source"] == "reauth"
]
async def test_update_connection_failure(hass, remotews, mock_now):
"""Testing update tv connection failure exception."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=[OSError("Boom"), DEFAULT_MOCK],
):
await setup_samsungtv(hass, MOCK_CONFIGWS)
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
side_effect=ConnectionFailure("Boom"),
):
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert [
flow
for flow in hass.config_entries.flow.async_progress()
if flow["context"]["source"] == "reauth"
]
async def test_update_unhandled_response(hass, remote, mock_now):
"""Testing update tv unhandled response exception."""
await setup_samsungtv(hass, MOCK_CONFIG)
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=[exceptions.UnhandledResponse("Boom"), DEFAULT_MOCK],
):
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_ON
async def test_send_key(hass, remote):
"""Test for send key."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
state = hass.states.get(ENTITY_ID)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_VOLUP")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
assert state.state == STATE_ON
async def test_send_key_broken_pipe(hass, remote):
"""Testing broken pipe Exception."""
await setup_samsungtv(hass, MOCK_CONFIG)
remote.control = Mock(side_effect=BrokenPipeError("Boom"))
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_ON
async def test_send_key_connection_closed_retry_succeed(hass, remote):
"""Test retry on connection closed."""
await setup_samsungtv(hass, MOCK_CONFIG)
remote.control = Mock(
side_effect=[exceptions.ConnectionClosed("Boom"), DEFAULT_MOCK, DEFAULT_MOCK]
)
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
state = hass.states.get(ENTITY_ID)
# key because of retry two times and update called
assert remote.control.call_count == 2
assert remote.control.call_args_list == [
call("KEY_VOLUP"),
call("KEY_VOLUP"),
]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
assert state.state == STATE_ON
async def test_send_key_unhandled_response(hass, remote):
"""Testing unhandled response exception."""
await setup_samsungtv(hass, MOCK_CONFIG)
remote.control = Mock(side_effect=exceptions.UnhandledResponse("Boom"))
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_ON
async def test_send_key_websocketexception(hass, remote):
"""Testing unhandled response exception."""
await setup_samsungtv(hass, MOCK_CONFIG)
remote.control = Mock(side_effect=WebSocketException("Boom"))
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_ON
async def test_send_key_os_error(hass, remote):
"""Testing broken pipe Exception."""
await setup_samsungtv(hass, MOCK_CONFIG)
remote.control = Mock(side_effect=OSError("Boom"))
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_ON
async def test_name(hass, remote):
"""Test for name property."""
await setup_samsungtv(hass, MOCK_CONFIG)
state = hass.states.get(ENTITY_ID)
assert state.attributes[ATTR_FRIENDLY_NAME] == "fake"
async def test_state_with_turnon(hass, remote, delay):
"""Test for state property."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_ON
assert delay.call_count == 1
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_OFF
async def test_state_without_turnon(hass, remote):
"""Test for state property."""
await setup_samsungtv(hass, MOCK_CONFIG_NOTURNON)
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID_NOTURNON}, True
)
state = hass.states.get(ENTITY_ID_NOTURNON)
assert state.state == STATE_ON
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_ID_NOTURNON}, True
)
state = hass.states.get(ENTITY_ID_NOTURNON)
assert state.state == STATE_OFF
async def test_supported_features_with_turnon(hass, remote):
"""Test for supported_features property."""
await setup_samsungtv(hass, MOCK_CONFIG)
state = hass.states.get(ENTITY_ID)
assert (
state.attributes[ATTR_SUPPORTED_FEATURES] == SUPPORT_SAMSUNGTV | SUPPORT_TURN_ON
)
async def test_supported_features_without_turnon(hass, remote):
"""Test for supported_features property."""
await setup_samsungtv(hass, MOCK_CONFIG_NOTURNON)
state = hass.states.get(ENTITY_ID_NOTURNON)
assert state.attributes[ATTR_SUPPORTED_FEATURES] == SUPPORT_SAMSUNGTV
async def test_device_class(hass, remote):
"""Test for device_class property."""
await setup_samsungtv(hass, MOCK_CONFIG)
state = hass.states.get(ENTITY_ID)
assert state.attributes[ATTR_DEVICE_CLASS] == DEVICE_CLASS_TV
async def test_turn_off_websocket(hass, remotews):
"""Test for turn_off."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=[OSError("Boom"), DEFAULT_MOCK],
):
await setup_samsungtv(hass, MOCK_CONFIGWS)
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
# key called
assert remotews.send_key.call_count == 1
assert remotews.send_key.call_args_list == [call("KEY_POWER")]
async def test_turn_off_legacy(hass, remote):
"""Test for turn_off."""
await setup_samsungtv(hass, MOCK_CONFIG_NOTURNON)
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_ID_NOTURNON}, True
)
# key called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_POWEROFF")]
async def test_turn_off_os_error(hass, remote, caplog):
"""Test for turn_off with OSError."""
caplog.set_level(logging.DEBUG)
await setup_samsungtv(hass, MOCK_CONFIG)
remote.close = Mock(side_effect=OSError("BOOM"))
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
assert "Could not establish connection" in caplog.text
async def test_volume_up(hass, remote):
"""Test for volume_up."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_VOLUP")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
async def test_volume_down(hass, remote):
"""Test for volume_down."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_DOWN, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_VOLDOWN")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
async def test_mute_volume(hass, remote):
"""Test for mute_volume."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN,
SERVICE_VOLUME_MUTE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_MEDIA_VOLUME_MUTED: True},
True,
)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_MUTE")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
async def test_media_play(hass, remote):
"""Test for media_play."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_MEDIA_PLAY, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_PLAY")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
async def test_media_pause(hass, remote):
"""Test for media_pause."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_MEDIA_PAUSE, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_PAUSE")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
async def test_media_next_track(hass, remote):
"""Test for media_next_track."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_MEDIA_NEXT_TRACK, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_CHUP")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
async def test_media_previous_track(hass, remote):
"""Test for media_previous_track."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_CHDOWN")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
async def test_turn_on_with_turnon(hass, remote, delay):
"""Test turn on."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
assert delay.call_count == 1
async def test_turn_on_without_turnon(hass, remote):
"""Test turn on."""
await setup_samsungtv(hass, MOCK_CONFIG_NOTURNON)
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_ID_NOTURNON}, True
)
# nothing called as not supported feature
assert remote.control.call_count == 0
async def test_play_media(hass, remote):
"""Test for play_media."""
asyncio_sleep = asyncio.sleep
sleeps = []
async def sleep(duration, loop):
sleeps.append(duration)
await asyncio_sleep(0, loop=loop)
await setup_samsungtv(hass, MOCK_CONFIG)
with patch("asyncio.sleep", new=sleep):
assert await hass.services.async_call(
DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_CHANNEL,
ATTR_MEDIA_CONTENT_ID: "576",
},
True,
)
# keys and update called
assert remote.control.call_count == 4
assert remote.control.call_args_list == [
call("KEY_5"),
call("KEY_7"),
call("KEY_6"),
call("KEY_ENTER"),
]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
assert len(sleeps) == 3
async def test_play_media_invalid_type(hass, remote):
"""Test for play_media with invalid media type."""
with patch("homeassistant.components.samsungtv.bridge.Remote") as remote:
url = "https://example.com"
await setup_samsungtv(hass, MOCK_CONFIG)
remote.reset_mock()
assert await hass.services.async_call(
DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_URL,
ATTR_MEDIA_CONTENT_ID: url,
},
True,
)
# only update called
assert remote.control.call_count == 0
assert remote.close.call_count == 0
assert remote.call_count == 1
async def test_play_media_channel_as_string(hass, remote):
"""Test for play_media with invalid channel as string."""
with patch("homeassistant.components.samsungtv.bridge.Remote") as remote:
url = "https://example.com"
await setup_samsungtv(hass, MOCK_CONFIG)
remote.reset_mock()
assert await hass.services.async_call(
DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_CHANNEL,
ATTR_MEDIA_CONTENT_ID: url,
},
True,
)
# only update called
assert remote.control.call_count == 0
assert remote.close.call_count == 0
assert remote.call_count == 1
async def test_play_media_channel_as_non_positive(hass, remote):
"""Test for play_media with invalid channel as non positive integer."""
with patch("homeassistant.components.samsungtv.bridge.Remote") as remote:
await setup_samsungtv(hass, MOCK_CONFIG)
remote.reset_mock()
assert await hass.services.async_call(
DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_CHANNEL,
ATTR_MEDIA_CONTENT_ID: "-4",
},
True,
)
# only update called
assert remote.control.call_count == 0
assert remote.close.call_count == 0
assert remote.call_count == 1
async def test_select_source(hass, remote):
"""Test for select_source."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SELECT_SOURCE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_INPUT_SOURCE: "HDMI"},
True,
)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_HDMI")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
async def test_select_source_invalid_source(hass, remote):
"""Test for select_source with invalid source."""
with patch("homeassistant.components.samsungtv.bridge.Remote") as remote:
await setup_samsungtv(hass, MOCK_CONFIG)
remote.reset_mock()
assert await hass.services.async_call(
DOMAIN,
SERVICE_SELECT_SOURCE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_INPUT_SOURCE: "INVALID"},
True,
)
# only update called
assert remote.control.call_count == 0
assert remote.close.call_count == 0
assert remote.call_count == 1
|
from collections import namedtuple
from datetime import datetime as dt
import pandas as pd
from arctic.date._mktz import mktz
from arctic.multi_index import groupby_asof
BitemporalItem = namedtuple('BitemporalItem', 'symbol, library, data, metadata, last_updated')
class BitemporalStore(object):
""" A versioned pandas DataFrame store.
As the name hinted, this holds versions of DataFrame by maintaining an extra 'insert time' index internally.
"""
def __init__(self, version_store, observe_column='observed_dt'):
"""
Parameters
----------
version_store : `VersionStore`
The version store that keeps the underlying data frames
observe_column : `str`
Column name for the datetime index that represents the insertion time of a row of data. Unless you intend to
read raw data out, this column is internal to this store.
"""
self._store = version_store
self.observe_column = observe_column
def read(self, symbol, as_of=None, raw=False, **kwargs):
# TODO: shall we block from_version from getting into super.read?
"""Read data for the named symbol. Returns a BitemporalItem object with
a data and metdata element (as passed into write).
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `datetime.datetime`
Return the data as it was as_of the point in time.
raw : `bool`
If True, will return the full bitemporal dataframe (i.e. all versions of the data). This also means as_of is
ignored.
Returns
-------
BitemporalItem namedtuple which contains a .data and .metadata element
"""
item = self._store.read(symbol, **kwargs)
last_updated = max(item.data.index.get_level_values(self.observe_column))
if raw:
return BitemporalItem(symbol=symbol, library=self._store._arctic_lib.get_name(), data=item.data,
metadata=item.metadata,
last_updated=last_updated)
else:
index_names = list(item.data.index.names)
index_names.remove(self.observe_column)
return BitemporalItem(symbol=symbol, library=self._store._arctic_lib.get_name(),
data=groupby_asof(item.data, as_of=as_of, dt_col=index_names,
asof_col=self.observe_column),
metadata=item.metadata, last_updated=last_updated)
def update(self, symbol, data, metadata=None, upsert=True, as_of=None, **kwargs):
""" Append 'data' under the specified 'symbol' name to this library.
Parameters
----------
symbol : `str`
symbol name for the item
data : `pd.DataFrame`
to be persisted
metadata : `dict`
An optional dictionary of metadata to persist along with the symbol. If None and there are existing
metadata, current metadata will be maintained
upsert : `bool`
Write 'data' if no previous version exists.
as_of : `datetime.datetime`
The "insert time". Default to datetime.now()
"""
local_tz = mktz()
if not as_of:
as_of = dt.now()
if as_of.tzinfo is None:
as_of = as_of.replace(tzinfo=local_tz)
data = self._add_observe_dt_index(data, as_of)
if upsert and not self._store.has_symbol(symbol):
df = data
else:
existing_item = self._store.read(symbol, **kwargs)
if metadata is None:
metadata = existing_item.metadata
df = existing_item.data.append(data).sort_index(kind='mergesort')
self._store.write(symbol, df, metadata=metadata, prune_previous_version=True)
def write(self, *args, **kwargs):
# TODO: may be diff + append?
raise NotImplementedError('Direct write for BitemporalStore is not supported. Use append instead'
'to add / modify timeseries.')
def _add_observe_dt_index(self, df, as_of):
index_names = list(df.index.names)
index_names.append(self.observe_column)
index = [x + (as_of,) if df.index.nlevels > 1 else (x, as_of) for x in df.index.tolist()]
df = df.set_index(pd.MultiIndex.from_tuples(index, names=index_names), inplace=False)
return df
|
from mock import patch, call
from arctic.scripts import arctic_list_libraries
from ...util import run_as_main
def test_list_library(mongo_host, library, library_name):
with patch('arctic.scripts.arctic_list_libraries.print') as p:
run_as_main(arctic_list_libraries.main, "--host", mongo_host)
for x in p.call_args_list:
if x == call(library_name):
return
assert False, "Failed to find a library"
def test_list_library_args(mongo_host, library, library_name):
with patch('arctic.scripts.arctic_list_libraries.print') as p:
run_as_main(arctic_list_libraries.main, "--host", mongo_host, library_name[:2])
for x in p.call_args_list:
assert x[0][0].startswith(library_name[:2])
def test_list_library_args_not_found(mongo_host, library, library_name):
with patch('arctic.scripts.arctic_list_libraries.print') as p:
run_as_main(arctic_list_libraries.main, "--host", mongo_host, 'some_library_which_doesnt_exist')
assert p.call_count == 0
|
import os
from openrazer_daemon.hardware.device_base import RazerDevice
# Hack to get a list of hardware modules to import
HARDWARE_MODULES = ['openrazer_daemon.hardware.' + os.path.splitext(hw_file)[0] for hw_file in os.listdir(os.path.dirname(__file__)) if hw_file not in ('device_base.py', '__init__.py') and hw_file.endswith('.py')]
# List of classes to exclude from the class finding
EXCLUDED_CLASSES = ('RazerDevice', 'RazerDeviceBrightnessSuspend')
def get_device_classes():
"""
Get a list of hardware classes
:return: List of RazerDevice subclasses
:rtype: list of callable
"""
classes = []
for hw_module in HARDWARE_MODULES:
imported_module = __import__(hw_module, globals=globals(), locals=locals(), fromlist=['*'], level=0)
for class_name, class_instance in imported_module.__dict__.items():
if class_name in EXCLUDED_CLASSES or class_name.startswith('_') or not isinstance(class_instance, type):
continue
classes.append(class_instance)
return sorted(classes, key=lambda cls: cls.__name__)
|
from homeassistant.components.remote import RemoteEntity
from homeassistant.const import DEVICE_DEFAULT_NAME
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Demo config entry."""
setup_platform(hass, {}, async_add_entities)
def setup_platform(hass, config, add_entities_callback, discovery_info=None):
"""Set up the demo remotes."""
add_entities_callback(
[
DemoRemote("Remote One", False, None),
DemoRemote("Remote Two", True, "mdi:remote"),
]
)
class DemoRemote(RemoteEntity):
"""Representation of a demo remote."""
def __init__(self, name, state, icon):
"""Initialize the Demo Remote."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self._icon = icon
self._last_command_sent = None
@property
def should_poll(self):
"""No polling needed for a demo remote."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def icon(self):
"""Return the icon to use for device if any."""
return self._icon
@property
def is_on(self):
"""Return true if remote is on."""
return self._state
@property
def device_state_attributes(self):
"""Return device state attributes."""
if self._last_command_sent is not None:
return {"last_command_sent": self._last_command_sent}
def turn_on(self, **kwargs):
"""Turn the remote on."""
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the remote off."""
self._state = False
self.schedule_update_ha_state()
def send_command(self, command, **kwargs):
"""Send a command to a device."""
for com in command:
self._last_command_sent = com
self.schedule_update_ha_state()
|
import inspect
import logging
from typing import Any, Callable, Dict, Optional
def deprecated_substitute(substitute_name: str) -> Callable[..., Callable]:
"""Help migrate properties to new names.
When a property is added to replace an older property, this decorator can
be added to the new property, listing the old property as the substitute.
If the old property is defined, its value will be used instead, and a log
warning will be issued alerting the user of the impending change.
"""
def decorator(func: Callable) -> Callable:
"""Decorate function as deprecated."""
def func_wrapper(self: Callable) -> Any:
"""Wrap for the original function."""
if hasattr(self, substitute_name):
# If this platform is still using the old property, issue
# a logger warning once with instructions on how to fix it.
warnings = getattr(func, "_deprecated_substitute_warnings", {})
module_name = self.__module__
if not warnings.get(module_name):
logger = logging.getLogger(module_name)
logger.warning(
"'%s' is deprecated. Please rename '%s' to "
"'%s' in '%s' to ensure future support.",
substitute_name,
substitute_name,
func.__name__,
inspect.getfile(self.__class__),
)
warnings[module_name] = True
setattr(func, "_deprecated_substitute_warnings", warnings)
# Return the old property
return getattr(self, substitute_name)
return func(self)
return func_wrapper
return decorator
def get_deprecated(
config: Dict[str, Any], new_name: str, old_name: str, default: Optional[Any] = None
) -> Optional[Any]:
"""Allow an old config name to be deprecated with a replacement.
If the new config isn't found, but the old one is, the old value is used
and a warning is issued to the user.
"""
if old_name in config:
module = inspect.getmodule(inspect.stack()[1][0])
if module is not None:
module_name = module.__name__
else:
# If Python is unable to access the sources files, the call stack frame
# will be missing information, so let's guard.
# https://github.com/home-assistant/core/issues/24982
module_name = __name__
logger = logging.getLogger(module_name)
logger.warning(
"'%s' is deprecated. Please rename '%s' to '%s' in your "
"configuration file.",
old_name,
old_name,
new_name,
)
return config.get(old_name)
return config.get(new_name, default)
|
import asyncio
from functools import partial
import logging
import broadlink as blk
from broadlink.exceptions import (
AuthenticationError,
AuthorizationError,
BroadlinkException,
ConnectionClosedError,
NetworkTimeoutError,
)
from homeassistant.const import CONF_HOST, CONF_MAC, CONF_NAME, CONF_TIMEOUT, CONF_TYPE
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
from .const import DEFAULT_PORT, DOMAIN, DOMAINS_AND_TYPES
from .updater import get_update_manager
_LOGGER = logging.getLogger(__name__)
def get_domains(device_type):
"""Return the domains available for a device type."""
return {domain for domain, types in DOMAINS_AND_TYPES if device_type in types}
class BroadlinkDevice:
"""Manages a Broadlink device."""
def __init__(self, hass, config):
"""Initialize the device."""
self.hass = hass
self.config = config
self.api = None
self.update_manager = None
self.fw_version = None
self.authorized = None
self.reset_jobs = []
@property
def name(self):
"""Return the name of the device."""
return self.config.title
@property
def unique_id(self):
"""Return the unique id of the device."""
return self.config.unique_id
@staticmethod
async def async_update(hass, entry):
"""Update the device and related entities.
Triggered when the device is renamed on the frontend.
"""
device_registry = await dr.async_get_registry(hass)
device_entry = device_registry.async_get_device(
{(DOMAIN, entry.unique_id)}, set()
)
device_registry.async_update_device(device_entry.id, name=entry.title)
await hass.config_entries.async_reload(entry.entry_id)
async def async_setup(self):
"""Set up the device and related entities."""
config = self.config
api = blk.gendevice(
config.data[CONF_TYPE],
(config.data[CONF_HOST], DEFAULT_PORT),
bytes.fromhex(config.data[CONF_MAC]),
name=config.title,
)
api.timeout = config.data[CONF_TIMEOUT]
try:
await self.hass.async_add_executor_job(api.auth)
except AuthenticationError:
await self._async_handle_auth_error()
return False
except (NetworkTimeoutError, OSError) as err:
raise ConfigEntryNotReady from err
except BroadlinkException as err:
_LOGGER.error(
"Failed to authenticate to the device at %s: %s", api.host[0], err
)
return False
self.api = api
self.authorized = True
update_manager = get_update_manager(self)
coordinator = update_manager.coordinator
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady()
self.update_manager = update_manager
self.hass.data[DOMAIN].devices[config.entry_id] = self
self.reset_jobs.append(config.add_update_listener(self.async_update))
try:
self.fw_version = await self.hass.async_add_executor_job(api.get_fwversion)
except (BroadlinkException, OSError):
pass
# Forward entry setup to related domains.
tasks = (
self.hass.config_entries.async_forward_entry_setup(config, domain)
for domain in get_domains(self.api.type)
)
for entry_setup in tasks:
self.hass.async_create_task(entry_setup)
return True
async def async_unload(self):
"""Unload the device and related entities."""
if self.update_manager is None:
return True
while self.reset_jobs:
self.reset_jobs.pop()()
tasks = (
self.hass.config_entries.async_forward_entry_unload(self.config, domain)
for domain in get_domains(self.api.type)
)
results = await asyncio.gather(*tasks)
return all(results)
async def async_auth(self):
"""Authenticate to the device."""
try:
await self.hass.async_add_executor_job(self.api.auth)
except (BroadlinkException, OSError) as err:
_LOGGER.debug(
"Failed to authenticate to the device at %s: %s", self.api.host[0], err
)
if isinstance(err, AuthenticationError):
await self._async_handle_auth_error()
return False
return True
async def async_request(self, function, *args, **kwargs):
"""Send a request to the device."""
request = partial(function, *args, **kwargs)
try:
return await self.hass.async_add_executor_job(request)
except (AuthorizationError, ConnectionClosedError):
if not await self.async_auth():
raise
return await self.hass.async_add_executor_job(request)
async def _async_handle_auth_error(self):
"""Handle an authentication error."""
if self.authorized is False:
return
self.authorized = False
_LOGGER.error(
"The device at %s is locked for authentication. Follow the configuration flow to unlock it",
self.config.data[CONF_HOST],
)
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "reauth"},
data={CONF_NAME: self.name, **self.config.data},
)
)
|
import argparse
import json
import logging
import os
import sys
from typing import Mapping
from typing import Sequence
from kubernetes.client.rest import ApiException
from paasta_tools.kubernetes_tools import create_kubernetes_secret_signature
from paasta_tools.kubernetes_tools import create_secret
from paasta_tools.kubernetes_tools import get_kubernetes_secret_signature
from paasta_tools.kubernetes_tools import KubeClient
from paasta_tools.kubernetes_tools import update_kubernetes_secret_signature
from paasta_tools.kubernetes_tools import update_secret
from paasta_tools.secret_tools import get_secret_provider
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import load_system_paasta_config
log = logging.getLogger(__name__)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Sync paasta secrets into k8s")
parser.add_argument(
"service_list",
nargs="+",
help="The list of services to sync secrets for",
metavar="SERVICE",
)
parser.add_argument(
"-c",
"--cluster",
dest="cluster",
metavar="CLUSTER",
default=None,
help="Kubernetes cluster name",
)
parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
parser.add_argument(
"-v", "--verbose", action="store_true", dest="verbose", default=False
)
args = parser.parse_args()
return args
def main() -> None:
args = parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
system_paasta_config = load_system_paasta_config()
if args.cluster:
cluster = args.cluster
else:
cluster = system_paasta_config.get_cluster()
secret_provider_name = system_paasta_config.get_secret_provider_name()
vault_cluster_config = system_paasta_config.get_vault_cluster_config()
kube_client = KubeClient()
sys.exit(0) if sync_all_secrets(
kube_client=kube_client,
cluster=cluster,
service_list=args.service_list,
secret_provider_name=secret_provider_name,
vault_cluster_config=vault_cluster_config,
soa_dir=args.soa_dir,
) else sys.exit(1)
def sync_all_secrets(
kube_client: KubeClient,
cluster: str,
service_list: Sequence[str],
secret_provider_name: str,
vault_cluster_config: Mapping[str, str],
soa_dir: str,
) -> bool:
results = []
for service in service_list:
results.append(
sync_secrets(
kube_client=kube_client,
cluster=cluster,
service=service,
secret_provider_name=secret_provider_name,
vault_cluster_config=vault_cluster_config,
soa_dir=soa_dir,
)
)
return all(results)
def sync_secrets(
kube_client: KubeClient,
cluster: str,
service: str,
secret_provider_name: str,
vault_cluster_config: Mapping[str, str],
soa_dir: str,
) -> bool:
secret_dir = os.path.join(soa_dir, service, "secrets")
secret_provider_kwargs = {
"vault_cluster_config": vault_cluster_config,
# TODO: make vault-tools support k8s auth method so we don't have to
# mount a token in.
"vault_auth_method": "token",
"vault_token_file": "/root/.vault_token",
}
secret_provider = get_secret_provider(
secret_provider_name=secret_provider_name,
soa_dir=soa_dir,
service_name=service,
cluster_names=[cluster],
secret_provider_kwargs=secret_provider_kwargs,
)
if not os.path.isdir(secret_dir):
log.debug(f"No secrets dir for {service}")
return True
with os.scandir(secret_dir) as secret_file_paths:
for secret_file_path in secret_file_paths:
if secret_file_path.path.endswith("json"):
secret = secret_file_path.name.replace(".json", "")
with open(secret_file_path, "r") as secret_file:
secret_data = json.load(secret_file)
secret_signature = secret_provider.get_secret_signature_from_data(
secret_data
)
if secret_signature:
kubernetes_secret_signature = get_kubernetes_secret_signature(
kube_client=kube_client, secret=secret, service=service
)
if not kubernetes_secret_signature:
log.info(f"{secret} for {service} not found, creating")
try:
create_secret(
kube_client=kube_client,
secret=secret,
service=service,
secret_provider=secret_provider,
)
except ApiException as e:
if e.status == 409:
log.warning(
f"Secret {secret} for {service} already exists"
)
else:
raise
create_kubernetes_secret_signature(
kube_client=kube_client,
secret=secret,
service=service,
secret_signature=secret_signature,
)
elif secret_signature != kubernetes_secret_signature:
log.info(
f"{secret} for {service} needs updating as signature changed"
)
update_secret(
kube_client=kube_client,
secret=secret,
service=service,
secret_provider=secret_provider,
)
update_kubernetes_secret_signature(
kube_client=kube_client,
secret=secret,
service=service,
secret_signature=secret_signature,
)
else:
log.info(f"{secret} for {service} up to date")
return True
if __name__ == "__main__":
main()
|
import logging
from unittest import skipIf
from stash.tests.stashtest import StashTestCase
try:
from stash.system.shui.tkui import ShTerminal
except ImportError:
ShTerminal = None
class NoInitTkTerminal(ShTerminal):
"""
Subclass of ShTerminal which does not initiate the superclass
"""
def __init__(self, text=u""):
self._text = ""
self.text = text
self.logger = logging.getLogger('StaSh.Terminal')
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
@skipIf(ShTerminal is None, "No Tk-GUI available")
class TkTerminalTests(StashTestCase):
"""
Tests for stash.system.shui.tkui.ShTerminal
"""
tc = NoInitTkTerminal
def test_tk_index_conversion(self):
"""
Test conversion to and from a tk index to a tuple
"""
values = { # tk index -> expected
"1.0": (0, 0),
"1.1": (0, 1),
"2.0": (1, 0),
"2.2": (1, 2),
"10.11": (9, 11),
"9.2": (8, 2),
}
terminal = self.tc()
for tki in values:
expected = values[tki]
converted = terminal._tk_index_to_tuple(tki)
self.assertEqual(converted, expected)
# convert back
back = terminal._tuple_to_tk_index(converted)
self.assertEqual(back, tki)
def test_abs_rel_conversion_1(self):
"""
First test for conversion of absolute and relative indexes
"""
s = """0123
567
9
"""
values = { # rel -> abs
0: (0, 0),
1: (0, 1),
2: (0, 2),
3: (0, 3),
4: (0, 4),
5: (1, 0),
6: (1, 1),
7: (1, 2),
8: (1, 3),
9: (2, 0),
10: (2, 1),
}
terminal = self.tc(s)
for rel in values:
expected = values[rel]
ab = terminal._rel_cursor_pos_to_abs_pos(rel)
self.assertEqual(ab, expected)
# convert back
back = terminal._abs_cursor_pos_to_rel_pos(ab)
self.assertEqual(back, rel)
|
from time import perf_counter
from flexx import app, event
def find_prime(n):
primes = []
def isprime(x):
if x <= 1:
return False
elif x == 2:
return True
for i in range(2, x//2+1):
if x % i == 0:
return False
return True
t0 = perf_counter()
i = 0
while len(primes) < n:
i += 1
if isprime(i):
primes.append(i)
t1 = perf_counter()
print(i, 'found in ', t1-t0, 'seconds')
class PrimeFinder(app.PyComponent):
def init(self):
self.js = PrimeFinderJs()
@event.action
def find_prime_py(self, n):
find_prime(n)
@event.action
def find_prime_js(self, n):
self.js.find_prime_js(n)
class PrimeFinderJs(app.JsComponent):
@event.action
def find_prime_js(self, n):
find_prime(n)
if __name__ == '__main__':
# Create app instance
finder = app.launch(PrimeFinder, 'app or chrome-app')
finder.find_prime_py(2000) # 0.6-0.7 s
finder.find_prime_js(2000) # 0.1-0.2 s
app.run()
|
from __future__ import absolute_import
from __future__ import unicode_literals
import getpass
import logging
import verboselogs
from ..looters import InstaLooter
logger = verboselogs.VerboseLogger(__name__)
def login(args):
if args['--username']:
username = args['--username']
if not InstaLooter._logged_in():
password = args['--password'] or getpass.getpass()
InstaLooter._login(username, password)
if not args['--quiet']:
logger.success('Logged in.')
elif not args['--quiet']:
logger.success("Already logged in.")
|
import os
from select import select
from subprocess import PIPE
import sys
import time
from itertools import chain
from plumbum.commands.processes import run_proc, ProcessExecutionError
from plumbum.commands.processes import BY_TYPE
import plumbum.commands.base
from plumbum.lib import read_fd_decode_safely
class Future(object):
"""Represents a "future result" of a running process. It basically wraps a ``Popen``
object and the expected exit code, and provides poll(), wait(), returncode, stdout,
and stderr.
"""
def __init__(self, proc, expected_retcode, timeout=None):
self.proc = proc
self._expected_retcode = expected_retcode
self._timeout = timeout
self._returncode = None
self._stdout = None
self._stderr = None
def __repr__(self):
return "<Future %r (%s)>" % (
self.proc.argv,
self._returncode if self.ready() else "running",
)
def poll(self):
"""Polls the underlying process for termination; returns ``False`` if still running,
or ``True`` if terminated"""
if self.proc.poll() is not None:
self.wait()
return self._returncode is not None
ready = poll
def wait(self):
"""Waits for the process to terminate; will raise a
:class:`plumbum.commands.ProcessExecutionError` in case of failure"""
if self._returncode is not None:
return
self._returncode, self._stdout, self._stderr = run_proc(
self.proc, self._expected_retcode, self._timeout)
@property
def stdout(self):
"""The process' stdout; accessing this property will wait for the process to finish"""
self.wait()
return self._stdout
@property
def stderr(self):
"""The process' stderr; accessing this property will wait for the process to finish"""
self.wait()
return self._stderr
@property
def returncode(self):
"""The process' returncode; accessing this property will wait for the process to finish"""
self.wait()
return self._returncode
#===================================================================================================
# execution modifiers
#===================================================================================================
class ExecutionModifier(object):
__slots__ = ("__weakref__", )
def __repr__(self):
"""Automatically creates a representation for given subclass with slots.
Ignore hidden properties."""
slots = {}
for cls in self.__class__.__mro__:
slots_list = getattr(cls, "__slots__", ())
if isinstance(slots_list, str):
slots_list = (slots_list, )
for prop in slots_list:
if prop[0] != '_':
slots[prop] = getattr(self, prop)
mystrs = ("{0} = {1}".format(name, slots[name]) for name in slots)
return "{0}({1})".format(self.__class__.__name__, ", ".join(mystrs))
@classmethod
def __call__(cls, *args, **kwargs):
return cls(*args, **kwargs)
class _BG(ExecutionModifier):
"""
An execution modifier that runs the given command in the background, returning a
:class:`Future <plumbum.commands.Future>` object. In order to mimic shell syntax, it applies
when you right-and it with a command. If you wish to expect a different return code
(other than the normal success indicate by 0), use ``BG(retcode)``. Example::
future = sleep[5] & BG # a future expecting an exit code of 0
future = sleep[5] & BG(7) # a future expecting an exit code of 7
.. note::
When processes run in the **background** (either via ``popen`` or
:class:`& BG <plumbum.commands.BG>`), their stdout/stderr pipes might fill up,
causing them to hang. If you know a process produces output, be sure to consume it
every once in a while, using a monitoring thread/reactor in the background.
For more info, see `#48 <https://github.com/tomerfiliba/plumbum/issues/48>`_
"""
__slots__ = ("retcode", "kargs", "timeout")
def __init__(self, retcode=0, timeout=None, **kargs):
self.retcode = retcode
self.kargs = kargs
self.timeout = timeout
def __rand__(self, cmd):
return Future(
cmd.popen(**self.kargs), self.retcode, timeout=self.timeout)
BG = _BG()
class _FG(ExecutionModifier):
"""
An execution modifier that runs the given command in the foreground, passing it the
current process' stdin, stdout and stderr. Useful for interactive programs that require
a TTY. There is no return value.
In order to mimic shell syntax, it applies when you right-and it with a command.
If you wish to expect a different return code (other than the normal success indicate by 0),
use ``FG(retcode)``. Example::
vim & FG # run vim in the foreground, expecting an exit code of 0
vim & FG(7) # run vim in the foreground, expecting an exit code of 7
"""
__slots__ = ("retcode", "timeout")
def __init__(self, retcode=0, timeout=None):
self.retcode = retcode
self.timeout = timeout
def __rand__(self, cmd):
cmd(retcode=self.retcode,
stdin=None,
stdout=None,
stderr=None,
timeout=self.timeout)
FG = _FG()
class _TEE(ExecutionModifier):
"""Run a command, dumping its stdout/stderr to the current process's stdout
and stderr, but ALSO return them. Useful for interactive programs that
expect a TTY but also have valuable output.
Use as:
ls["-l"] & TEE
Returns a tuple of (return code, stdout, stderr), just like ``run()``.
"""
__slots__ = ("retcode", "buffered", "timeout")
def __init__(self, retcode=0, buffered=True, timeout=None):
"""`retcode` is the return code to expect to mean "success". Set
`buffered` to False to disable line-buffering the output, which may
cause stdout and stderr to become more entangled than usual.
"""
self.retcode = retcode
self.buffered = buffered
self.timeout = timeout
def __rand__(self, cmd):
with cmd.bgrun(
retcode=self.retcode,
stdin=None,
stdout=PIPE,
stderr=PIPE,
timeout=self.timeout) as p:
outbuf = []
errbuf = []
out = p.stdout
err = p.stderr
buffers = {out: outbuf, err: errbuf}
tee_to = {out: sys.stdout, err: sys.stderr}
done = False
while not done:
# After the process exits, we have to do one more
# round of reading in order to drain any data in the
# pipe buffer. Thus, we check poll() here,
# unconditionally enter the read loop, and only then
# break out of the outer loop if the process has
# exited.
done = (p.poll() is not None)
# We continue this loop until we've done a full
# `select()` call without collecting any input. This
# ensures that our final pass -- after process exit --
# actually drains the pipe buffers, even if it takes
# multiple calls to read().
progress = True
while progress:
progress = False
ready, _, _ = select((out, err), (), ())
for fd in ready:
buf = buffers[fd]
data, text = read_fd_decode_safely(fd, 4096)
if not data: # eof
continue
progress = True
# Python conveniently line-buffers stdout and stderr for
# us, so all we need to do is write to them
# This will automatically add up to three bytes if it cannot be decoded
tee_to[fd].write(text)
# And then "unbuffered" is just flushing after each write
if not self.buffered:
tee_to[fd].flush()
buf.append(data)
stdout = ''.join([x.decode('utf-8') for x in outbuf])
stderr = ''.join([x.decode('utf-8') for x in errbuf])
return p.returncode, stdout, stderr
TEE = _TEE()
class _TF(ExecutionModifier):
"""
An execution modifier that runs the given command, but returns True/False depending on the retcode.
This returns True if the expected exit code is returned, and false if it is not.
This is useful for checking true/false bash commands.
If you wish to expect a different return code (other than the normal success indicate by 0),
use ``TF(retcode)``. If you want to run the process in the forground, then use
``TF(FG=True)``.
Example::
local['touch']['/root/test'] & TF * Returns False, since this cannot be touched
local['touch']['/root/test'] & TF(1) # Returns True
local['touch']['/root/test'] & TF(FG=True) * Returns False, will show error message
"""
__slots__ = ("retcode", "FG", "timeout")
def __init__(self, retcode=0, FG=False, timeout=None):
"""`retcode` is the return code to expect to mean "success". Set
`FG` to True to run in the foreground.
"""
self.retcode = retcode
self.FG = FG
self.timeout = timeout
@classmethod
def __call__(cls, *args, **kwargs):
return cls(*args, **kwargs)
def __rand__(self, cmd):
try:
if self.FG:
cmd(retcode=self.retcode,
stdin=None,
stdout=None,
stderr=None,
timeout=self.timeout)
else:
cmd(retcode=self.retcode, timeout=self.timeout)
return True
except ProcessExecutionError:
return False
TF = _TF()
class _RETCODE(ExecutionModifier):
"""
An execution modifier that runs the given command, causing it to run and return the retcode.
This is useful for working with bash commands that have important retcodes but not very
useful output.
If you want to run the process in the forground, then use ``RETCODE(FG=True)``.
Example::
local['touch']['/root/test'] & RETCODE # Returns 1, since this cannot be touched
local['touch']['/root/test'] & RETCODE(FG=True) * Returns 1, will show error message
"""
__slots__ = ("foreground", "timeout")
def __init__(self, FG=False, timeout=None):
"""`FG` to True to run in the foreground.
"""
self.foreground = FG
self.timeout = timeout
@classmethod
def __call__(cls, *args, **kwargs):
return cls(*args, **kwargs)
def __rand__(self, cmd):
if self.foreground:
return cmd.run(
retcode=None,
stdin=None,
stdout=None,
stderr=None,
timeout=self.timeout)[0]
else:
return cmd.run(retcode=None, timeout=self.timeout)[0]
RETCODE = _RETCODE()
class _NOHUP(ExecutionModifier):
"""
An execution modifier that runs the given command in the background, disconnected
from the current process, returning a
standard popen object. It will keep running even if you close the current process.
In order to slightly mimic shell syntax, it applies
when you right-and it with a command. If you wish to use a diffent working directory
or different stdout, stderr, you can use named arguments. The default is ``NOHUP(
cwd=local.cwd, stdout='nohup.out', stderr=None)``. If stderr is None, stderr will be
sent to stdout. Use ``os.devnull`` for null output. Will respect redirected output.
Example::
sleep[5] & NOHUP # Outputs to nohup.out
sleep[5] & NOHUP(stdout=os.devnull) # No output
The equivelent bash command would be
.. code-block:: bash
nohup sleep 5 &
"""
__slots__ = ('cwd', 'stdout', 'stderr', 'append')
def __init__(self, cwd='.', stdout='nohup.out', stderr=None, append=True):
""" Set ``cwd``, ``stdout``, or ``stderr``.
Runs as a forked process. You can set ``append=False``, too.
"""
self.cwd = cwd
self.stdout = stdout
self.stderr = stderr
self.append = append
def __rand__(self, cmd):
if isinstance(cmd, plumbum.commands.base.StdoutRedirection):
stdout = cmd.file
append = False
cmd = cmd.cmd
elif isinstance(cmd, plumbum.commands.base.AppendingStdoutRedirection):
stdout = cmd.file
append = True
cmd = cmd.cmd
else:
stdout = self.stdout
append = self.append
return cmd.nohup(self.cwd, stdout, self.stderr, append)
NOHUP = _NOHUP()
class PipeToLoggerMixin():
"""
This mixin allows piping plumbum commands' output into a logger.
The logger must implement a ``log(level, msg)`` method, as in ``logging.Logger``
Example::
class MyLogger(logging.Logger, PipeToLoggerMixin):
pass
logger = MyLogger("example.app")
Here we send the output of an install.sh script into our log::
local['./install.sh'] & logger
We can choose the log-level for each stream::
local['./install.sh'] & logger.pipe(out_level=logging.DEBUG, err_level=logging.DEBUG)
Or use a convenience method for it::
local['./install.sh'] & logger.pipe_debug()
A prefix can be added to each line::
local['./install.sh'] & logger.pipe(prefix="install.sh: ")
If the command fails, an exception is raised as usual. This can be modified::
local['install.sh'] & logger.pipe_debug(retcode=None)
An exception is also raised if too much time (``DEFAULT_LINE_TIMEOUT``) passed between lines in the stream,
This can also be modified::
local['install.sh'] & logger.pipe(line_timeout=10)
If we happen to use logbook::
class MyLogger(logbook.Logger, PipeToLoggerMixin):
from logbook import DEBUG, INFO # hook up with logbook's levels
"""
from logging import DEBUG, INFO
DEFAULT_LINE_TIMEOUT = 10 * 60
DEFAULT_STDOUT = "INFO"
DEFAULT_STDERR = "DEBUG"
def pipe(self, out_level=None, err_level=None, prefix=None, line_timeout=None, **kw):
"""
Pipe a command's stdout and stderr lines into this logger.
:param out_level: the log level for lines coming from stdout
:param err_level: the log level for lines coming from stderr
Optionally use `prefix` for each line.
"""
class LogPipe(object):
def __rand__(_, cmd):
popen = cmd if hasattr(cmd, "iter_lines") else cmd.popen()
for typ, lines in popen.iter_lines(line_timeout=line_timeout, mode=BY_TYPE, **kw):
if not lines:
continue
level = levels[typ]
for line in lines.splitlines():
if prefix:
line = "%s: %s" % (prefix, line)
self.log(level, line)
return popen.returncode
levels = {1: getattr(self, self.DEFAULT_STDOUT), 2: getattr(self, self.DEFAULT_STDERR)}
if line_timeout is None:
line_timeout = self.DEFAULT_LINE_TIMEOUT
if out_level is not None:
levels[1] = out_level
if err_level is not None:
levels[2] = err_level
return LogPipe()
def pipe_info(self, prefix=None, **kw):
"""
Pipe a command's stdout and stderr lines into this logger (both at level INFO)
"""
return self.pipe(self.INFO, self.INFO, prefix=prefix, **kw)
def pipe_debug(self, prefix=None, **kw):
"""
Pipe a command's stdout and stderr lines into this logger (both at level DEBUG)
"""
return self.pipe(self.DEBUG, self.DEBUG, prefix=prefix, **kw)
def __rand__(self, cmd):
"""
Pipe a command's stdout and stderr lines into this logger.
Log levels for each stream are determined by ``DEFAULT_STDOUT`` and ``DEFAULT_STDERR``.
"""
return cmd & self.pipe(getattr(self, self.DEFAULT_STDOUT), getattr(self, self.DEFAULT_STDERR))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compare_gan.architectures import arch_ops as ops
from compare_gan.architectures import resnet_ops
import gin
from six.moves import range
import tensorflow as tf
@gin.configurable
class Generator(resnet_ops.ResNetGenerator):
"""ResNet generator, 4 blocks, supporting 32x32 resolution."""
def __init__(self,
hierarchical_z=False,
embed_z=False,
embed_y=False,
**kwargs):
"""Constructor for the ResNet Cifar generator.
Args:
hierarchical_z: Split z into chunks and only give one chunk to each.
Each chunk will also be concatenated to y, the one hot encoded labels.
embed_z: If True use a learnable embedding of z that is used instead.
The embedding will have the length of z.
embed_y: If True use a learnable embedding of y that is used instead.
The embedding will have the length of z (not y!).
**kwargs: additional arguments past on to ResNetGenerator.
"""
super(Generator, self).__init__(**kwargs)
self._hierarchical_z = hierarchical_z
self._embed_z = embed_z
self._embed_y = embed_y
def apply(self, z, y, is_training):
"""Build the generator network for the given inputs.
Args:
z: `Tensor` of shape [batch_size, z_dim] with latent code.
y: `Tensor` of shape [batch_size, num_classes] with one hot encoded
labels.
is_training: boolean, are we in train or eval model.
Returns:
A tensor of size [batch_size, 32, 32, colors] with values in [0, 1].
"""
assert self._image_shape[0] == 32
assert self._image_shape[1] == 32
num_blocks = 3
z_dim = z.shape[1].value
if self._embed_z:
z = ops.linear(z, z_dim, scope="embed_z", use_sn=self._spectral_norm)
if self._embed_y:
y = ops.linear(y, z_dim, scope="embed_y", use_sn=self._spectral_norm)
y_per_block = num_blocks * [y]
if self._hierarchical_z:
z_per_block = tf.split(z, num_blocks + 1, axis=1)
z0, z_per_block = z_per_block[0], z_per_block[1:]
if y is not None:
y_per_block = [tf.concat([zi, y], 1) for zi in z_per_block]
else:
z0 = z
z_per_block = num_blocks * [z]
output = ops.linear(z0, 4 * 4 * 256, scope="fc_noise",
use_sn=self._spectral_norm)
output = tf.reshape(output, [-1, 4, 4, 256], name="fc_reshaped")
for block_idx in range(3):
block = self._resnet_block(
name="B{}".format(block_idx + 1),
in_channels=256,
out_channels=256,
scale="up")
output = block(
output,
z=z_per_block[block_idx],
y=y_per_block[block_idx],
is_training=is_training)
# Final processing of the output.
output = self.batch_norm(
output, z=z, y=y, is_training=is_training, name="final_norm")
output = tf.nn.relu(output)
output = ops.conv2d(output, output_dim=self._image_shape[2], k_h=3, k_w=3,
d_h=1, d_w=1, name="final_conv",
use_sn=self._spectral_norm,)
return tf.nn.sigmoid(output)
@gin.configurable
class Discriminator(resnet_ops.ResNetDiscriminator):
"""ResNet discriminator, 4 blocks, supporting 32x32 with 1 or 3 colors."""
def __init__(self, project_y=False, **kwargs):
super(Discriminator, self).__init__(**kwargs)
self._project_y = project_y
def apply(self, x, y, is_training):
"""Apply the discriminator on a input.
Args:
x: `Tensor` of shape [batch_size, 32, 32, ?] with real or fake images.
y: `Tensor` of shape [batch_size, num_classes] with one hot encoded
labels.
is_training: Boolean, whether the architecture should be constructed for
training or inference.
Returns:
Tuple of 3 Tensors, the final prediction of the discriminator, the logits
before the final output activation function and logits form the second
last layer.
"""
resnet_ops.validate_image_inputs(x)
colors = x.shape[3].value
if colors not in [1, 3]:
raise ValueError("Number of color channels not supported: {}".format(
colors))
output = x
for block_idx in range(4):
block = self._resnet_block(
name="B{}".format(block_idx + 1),
in_channels=colors if block_idx == 0 else 128,
out_channels=128,
scale="down" if block_idx <= 1 else "none")
output = block(output, z=None, y=y, is_training=is_training)
# Final part - ReLU
output = tf.nn.relu(output)
h = tf.reduce_mean(output, axis=[1, 2])
out_logit = ops.linear(h, 1, scope="disc_final_fc",
use_sn=self._spectral_norm)
if self._project_y:
if y is None:
raise ValueError("You must provide class information y to project.")
embedded_y = ops.linear(y, 128, use_bias=False,
scope="embedding_fc", use_sn=self._spectral_norm)
out_logit += tf.reduce_sum(embedded_y * h, axis=1, keepdims=True)
out = tf.nn.sigmoid(out_logit)
return out, out_logit, h
|
from coverage import env
if env.WINDOWS:
# Windows implementation
def process_ram():
"""How much RAM is this process using? (Windows)"""
import ctypes
# From: http://lists.ubuntu.com/archives/bazaar-commits/2009-February/011990.html
class PROCESS_MEMORY_COUNTERS_EX(ctypes.Structure):
"""Used by GetProcessMemoryInfo"""
_fields_ = [
('cb', ctypes.c_ulong),
('PageFaultCount', ctypes.c_ulong),
('PeakWorkingSetSize', ctypes.c_size_t),
('WorkingSetSize', ctypes.c_size_t),
('QuotaPeakPagedPoolUsage', ctypes.c_size_t),
('QuotaPagedPoolUsage', ctypes.c_size_t),
('QuotaPeakNonPagedPoolUsage', ctypes.c_size_t),
('QuotaNonPagedPoolUsage', ctypes.c_size_t),
('PagefileUsage', ctypes.c_size_t),
('PeakPagefileUsage', ctypes.c_size_t),
('PrivateUsage', ctypes.c_size_t),
]
mem_struct = PROCESS_MEMORY_COUNTERS_EX()
ret = ctypes.windll.psapi.GetProcessMemoryInfo(
ctypes.windll.kernel32.GetCurrentProcess(),
ctypes.byref(mem_struct),
ctypes.sizeof(mem_struct)
)
if not ret: # pragma: part covered
return 0 # pragma: cant happen
return mem_struct.PrivateUsage
elif env.LINUX:
# Linux implementation
import os
_scale = {'kb': 1024, 'mb': 1024*1024}
def _VmB(key):
"""Read the /proc/PID/status file to find memory use."""
try:
# Get pseudo file /proc/<pid>/status
with open('/proc/%d/status' % os.getpid()) as t:
v = t.read()
except IOError: # pragma: cant happen
return 0 # non-Linux?
# Get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
i = v.index(key)
v = v[i:].split(None, 3)
if len(v) < 3: # pragma: part covered
return 0 # pragma: cant happen
# Convert Vm value to bytes.
return int(float(v[1]) * _scale[v[2].lower()])
def process_ram():
"""How much RAM is this process using? (Linux implementation)"""
return _VmB('VmRSS')
else:
# Generic implementation.
def process_ram():
"""How much RAM is this process using? (stdlib implementation)"""
import resource
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
|
import os
import os.path
import re
from configparser import RawConfigParser
from datetime import datetime
from typing import List, Optional
from weblate.vcs.base import Repository, RepositoryException
from weblate.vcs.ssh import SSH_WRAPPER
class HgRepository(Repository):
"""Repository implementation for Mercurial."""
_cmd = "hg"
_cmd_last_revision = ["log", "--limit", "1", "--template", "{node}"]
_cmd_last_remote_revision = [
"log",
"--limit",
"1",
"--template",
"{node}",
"--branch",
".",
]
_cmd_list_changed_files = ["status", "--rev"]
_version = None
name = "Mercurial"
req_version = "2.8"
default_branch = "default"
ref_to_remote = "head() and branch(.) and not closed() - ."
ref_from_remote = "outgoing()"
VERSION_RE = re.compile(r".*\(version ([^)]*)\).*")
def is_valid(self):
"""Check whether this is a valid repository."""
return os.path.exists(os.path.join(self.path, ".hg", "requires"))
def init(self):
"""Initialize the repository."""
self._popen(["init", self.path])
def check_config(self):
"""Check VCS configuration."""
# We directly set config as it takes same time as reading it
self.set_config("ui.ssh", SSH_WRAPPER.filename)
@classmethod
def _clone(cls, source: str, target: str, branch: str):
"""Clone repository."""
cls._popen(["clone", "--branch", branch, source, target])
def get_config(self, path):
"""Read entry from configuration."""
result = None
section, option = path.split(".", 1)
filename = os.path.join(self.path, ".hg", "hgrc")
config = RawConfigParser()
config.read(filename)
if config.has_option(section, option):
result = config.get(section, option)
return result
def set_config(self, path, value):
"""Set entry in local configuration."""
if not self.lock.is_locked:
raise RuntimeError("Repository operation without lock held!")
section, option = path.split(".", 1)
filename = os.path.join(self.path, ".hg", "hgrc")
config = RawConfigParser()
config.read(filename)
if not config.has_section(section):
config.add_section(section)
if config.has_option(section, option) and config.get(section, option) == value:
return
config.set(section, option, value)
with open(filename, "w") as handle:
config.write(handle)
def set_committer(self, name, mail):
"""Configure commiter name."""
self.set_config("ui.username", f"{name} <{mail}>")
def reset(self):
"""Reset working copy to match remote branch."""
self.set_config("extensions.strip", "")
self.execute(["update", "--clean", "remote(.)"])
if self.needs_push():
self.execute(["strip", "roots(outgoing())"])
self.clean_revision_cache()
def configure_merge(self):
"""Select the correct merge tool."""
self.set_config("ui.merge", "internal:merge")
merge_driver = self.get_merge_driver("po")
if merge_driver is not None:
self.set_config(
"merge-tools.weblate-merge-gettext-po.executable", merge_driver
)
self.set_config("merge-patterns.**.po", "weblate-merge-gettext-po")
def rebase(self, abort=False):
"""Rebase working copy on top of remote branch."""
self.set_config("extensions.rebase", "")
if abort:
self.execute(["rebase", "--abort"])
elif self.needs_merge():
if self.needs_ff():
self.execute(["update", "--clean", "remote(.)"])
else:
self.configure_merge()
try:
self.execute(["rebase", "-d", "remote(.)"])
except RepositoryException as error:
# Mercurial 3.8 changed error code and output
if (
error.retcode in (1, 255)
and "nothing to rebase" in error.args[0]
):
self.execute(["update", "--clean", "remote(.)"])
return
raise
def merge(self, abort=False, message=None):
"""Merge remote branch or reverts the merge."""
if abort:
self.execute(["update", "--clean", "."])
elif self.needs_merge():
if self.needs_ff():
self.execute(["update", "--clean", "remote(.)"])
else:
self.configure_merge()
# Fallback to merge
try:
self.execute(["merge", "-r", "remote(.)"])
except RepositoryException as error:
if error.retcode == 255:
# Nothing to merge
return
raise
self.execute(["commit", "--message", "Merge"])
def needs_commit(self, filenames: Optional[List[str]] = None):
"""Check whether repository needs commit."""
cmd = ["status", "--"]
if filenames:
cmd.extend(filenames)
status = self.execute(cmd, needs_lock=False)
return status != ""
def _get_revision_info(self, revision):
"""Return dictionary with detailed revision information."""
template = """
author_name: {person(author)}
author_email: {email(author)}
author: {author}
authordate: {rfc822date(date)}
commit_name: {person(author)}
commit_email: {email(author)}
commit: {author}
commitdate: {rfc822date(date)}
shortrevision: {short(node)}
message:
{desc}
"""
text = self.execute(
["log", "--limit", "1", "--template", template, "--rev", revision],
needs_lock=False,
merge_err=False,
)
result = {"revision": revision}
message = []
header = True
for line in text.splitlines():
line = line.strip()
if not line:
continue
if not header:
message.append(line)
continue
if line == "message:":
header = False
continue
name, value = line.strip().split(":", 1)
value = value.strip()
name = name.lower()
result[name] = value
result["message"] = "\n".join(message)
result["summary"] = message[0]
return result
def log_revisions(self, refspec):
"""Return revisin log for given refspec."""
return self.execute(
["log", "--template", "{node}\n", "--rev", refspec],
needs_lock=False,
merge_err=False,
).splitlines()
def needs_ff(self):
"""Check whether repository needs a fast-forward to upstream.
Checks whether the path to the upstream is linear.
"""
return bool(self.log_revisions(".::remote(.) - ."))
@classmethod
def _get_version(cls):
"""Return VCS program version."""
output = cls._popen(["version", "-q"], merge_err=False)
matches = cls.VERSION_RE.match(output)
if matches is None:
raise OSError(f"Failed to parse version string: {output}")
return matches.group(1)
def commit(
self,
message: str,
author: Optional[str] = None,
timestamp: Optional[datetime] = None,
files: Optional[List[str]] = None,
):
"""Create new revision."""
# Build the commit command
cmd = ["commit", "--message", message]
if author is not None:
cmd.extend(["--user", author])
if timestamp is not None:
cmd.extend(["--date", timestamp.ctime()])
# Add files one by one, this has to deal with
# removed, untracked and non existing files
if files is not None:
for name in files:
try:
self.execute(["add", "--", name])
except RepositoryException:
try:
self.execute(["remove", "--", name])
except RepositoryException:
continue
cmd.append(name)
# Bail out if there is nothing to commit.
# This can easily happen with squashing and reverting changes.
if not self.needs_commit(files):
return
# Execute it
self.execute(cmd)
# Clean cache
self.clean_revision_cache()
def remove(self, files: List[str], message: str, author: Optional[str] = None):
"""Remove files and creates new revision."""
self.execute(["remove", "--force", "--"] + files)
self.commit(message, author)
def configure_remote(
self, pull_url: str, push_url: str, branch: str, fast: bool = True
):
"""Configure remote repository."""
old_pull = self.get_config("paths.default")
old_push = self.get_config("paths.default-push")
if old_pull != pull_url:
# No origin existing or URL changed?
self.set_config("paths.default", pull_url)
if old_push != push_url:
self.set_config("paths.default-push", push_url)
# We also enable some necessary extensions here
self.set_config("extensions.strip", "")
self.set_config("extensions.rebase", "")
self.set_config("experimental.evolution", "all")
self.set_config("phases.publish", "False")
self.branch = branch
def on_branch(self, branch):
return branch == self.execute(["branch"], merge_err=False).strip()
def configure_branch(self, branch):
"""Configure repository branch."""
if not self.on_branch(branch):
self.execute(["update", branch])
self.branch = branch
def describe(self):
"""Verbosely describes current revision."""
return self.execute(
[
"log",
"-r",
".",
"--template",
"{latesttag}-{latesttagdistance}-{node|short}",
],
needs_lock=False,
merge_err=False,
).strip()
def push(self, branch):
"""Push given branch to remote repository."""
try:
self.execute(["push", "-b", self.branch])
except RepositoryException as error:
if error.retcode == 1:
# No changes found
return
raise
def get_file(self, path, revision):
"""Return content of file at given revision."""
return self.execute(
["cat", "--rev", revision, path], needs_lock=False, merge_err=False
)
def cleanup(self):
"""Remove not tracked files from the repository."""
self.set_config("extensions.purge", "")
self.execute(["purge"])
def update_remote(self):
"""Update remote repository."""
self.execute(["pull", "--branch", self.branch])
self.clean_revision_cache()
def parse_changed_files(self, lines):
"""Parses output with chanaged files."""
# Strip action prefix we do not use
yield from (line[2:] for line in lines)
def list_changed_files(self, refspec):
try:
return super().list_changed_files(refspec)
except RepositoryException as error:
if error.retcode == 255:
# Empty revision set
return []
raise
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from bind import BindCollector
##########################################################################
class TestBindCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('BindCollector', {
'interval': 10,
})
self.collector = BindCollector(config, None)
def test_import(self):
self.assertTrue(BindCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(
return_value=self.getFixture('bind.xml')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
metrics = {
'view._default.resstat.Queryv4': 0.000000,
'view._default.resstat.Queryv6': 0.000000,
'view._default.resstat.Responsev4': 0.000000,
'view._default.resstat.Responsev6': 0.000000,
'view._default.resstat.NXDOMAIN': 0.000000,
'view._default.resstat.SERVFAIL': 0.000000,
'view._default.resstat.FORMERR': 0.000000,
'view._default.resstat.OtherError': 0.000000,
'view._default.resstat.EDNS0Fail': 0.000000,
'view._default.resstat.Mismatch': 0.000000,
'view._default.resstat.Truncated': 0.000000,
'view._default.resstat.Lame': 0.000000,
'view._default.resstat.Retry': 0.000000,
'view._default.resstat.QueryAbort': 0.000000,
'view._default.resstat.QuerySockFail': 0.000000,
'view._default.resstat.QueryTimeout': 0.000000,
'view._default.resstat.GlueFetchv4': 0.000000,
'view._default.resstat.GlueFetchv6': 0.000000,
'view._default.resstat.GlueFetchv4Fail': 0.000000,
'view._default.resstat.GlueFetchv6Fail': 0.000000,
'view._default.resstat.ValAttempt': 0.000000,
'view._default.resstat.ValOk': 0.000000,
'view._default.resstat.ValNegOk': 0.000000,
'view._default.resstat.ValFail': 0.000000,
'view._default.resstat.QryRTT10': 0.000000,
'view._default.resstat.QryRTT100': 0.000000,
'view._default.resstat.QryRTT500': 0.000000,
'view._default.resstat.QryRTT800': 0.000000,
'view._default.resstat.QryRTT1600': 0.000000,
'view._default.resstat.QryRTT1600+': 0.000000,
'requests.QUERY': 0.000000,
'queries.A': 0.000000,
'nsstat.Requestv4': 0.000000,
'nsstat.Requestv6': 0.000000,
'nsstat.ReqEdns0': 0.000000,
'nsstat.ReqBadEDNSVer': 0.000000,
'nsstat.ReqTSIG': 0.000000,
'nsstat.ReqSIG0': 0.000000,
'nsstat.ReqBadSIG': 0.000000,
'nsstat.ReqTCP': 0.000000,
'nsstat.AuthQryRej': 0.000000,
'nsstat.RecQryRej': 0.000000,
'nsstat.XfrRej': 0.000000,
'nsstat.UpdateRej': 0.000000,
'nsstat.Response': 0.000000,
'nsstat.TruncatedResp': 0.000000,
'nsstat.RespEDNS0': 0.000000,
'nsstat.RespTSIG': 0.000000,
'nsstat.RespSIG0': 0.000000,
'nsstat.QrySuccess': 0.000000,
'nsstat.QryAuthAns': 0.000000,
'nsstat.QryNoauthAns': 0.000000,
'nsstat.QryReferral': 0.000000,
'nsstat.QryNxrrset': 0.000000,
'nsstat.QrySERVFAIL': 0.000000,
'nsstat.QryFORMERR': 0.000000,
'nsstat.QryNXDOMAIN': 0.000000,
'nsstat.QryRecursion': 0.000000,
'nsstat.QryDuplicate': 0.000000,
'nsstat.QryDropped': 0.000000,
'nsstat.QryFailure': 0.000000,
'nsstat.XfrReqDone': 0.000000,
'nsstat.UpdateReqFwd': 0.000000,
'nsstat.UpdateRespFwd': 0.000000,
'nsstat.UpdateFwdFail': 0.000000,
'nsstat.UpdateDone': 0.000000,
'nsstat.UpdateFail': 0.000000,
'nsstat.UpdateBadPrereq': 0.000000,
'zonestat.NotifyOutv4': 0.000000,
'zonestat.NotifyOutv6': 0.000000,
'zonestat.NotifyInv4': 0.000000,
'zonestat.NotifyInv6': 0.000000,
'zonestat.NotifyRej': 0.000000,
'zonestat.SOAOutv4': 0.000000,
'zonestat.SOAOutv6': 0.000000,
'zonestat.AXFRReqv4': 0.000000,
'zonestat.AXFRReqv6': 0.000000,
'zonestat.IXFRReqv4': 0.000000,
'zonestat.IXFRReqv6': 0.000000,
'zonestat.XfrSuccess': 0.000000,
'zonestat.XfrFail': 0.000000,
'sockstat.UDP4Open': 0.000000,
'sockstat.UDP6Open': 0.000000,
'sockstat.TCP4Open': 0.000000,
'sockstat.TCP6Open': 0.000000,
'sockstat.UnixOpen': 0.000000,
'sockstat.UDP4OpenFail': 0.000000,
'sockstat.UDP6OpenFail': 0.000000,
'sockstat.TCP4OpenFail': 0.000000,
'sockstat.TCP6OpenFail': 0.000000,
'sockstat.UnixOpenFail': 0.000000,
'sockstat.UDP4Close': 0.000000,
'sockstat.UDP6Close': 0.000000,
'sockstat.TCP4Close': 0.000000,
'sockstat.TCP6Close': 0.000000,
'sockstat.UnixClose': 0.000000,
'sockstat.FDWatchClose': 0.000000,
'sockstat.UDP4BindFail': 0.000000,
'sockstat.UDP6BindFail': 0.000000,
'sockstat.TCP4BindFail': 0.000000,
'sockstat.TCP6BindFail': 0.000000,
'sockstat.UnixBindFail': 0.000000,
'sockstat.FdwatchBindFail': 0.000000,
'sockstat.UDP4ConnFail': 0.000000,
'sockstat.UDP6ConnFail': 0.000000,
'sockstat.TCP4ConnFail': 0.000000,
'sockstat.TCP6ConnFail': 0.000000,
'sockstat.UnixConnFail': 0.000000,
'sockstat.FDwatchConnFail': 0.000000,
'sockstat.UDP4Conn': 0.000000,
'sockstat.UDP6Conn': 0.000000,
'sockstat.TCP4Conn': 0.000000,
'sockstat.TCP6Conn': 0.000000,
'sockstat.UnixConn': 0.000000,
'sockstat.FDwatchConn': 0.000000,
'sockstat.TCP4AcceptFail': 0.000000,
'sockstat.TCP6AcceptFail': 0.000000,
'sockstat.UnixAcceptFail': 0.000000,
'sockstat.TCP4Accept': 0.000000,
'sockstat.TCP6Accept': 0.000000,
'sockstat.UnixAccept': 0.000000,
'sockstat.UDP4SendErr': 0.000000,
'sockstat.UDP6SendErr': 0.000000,
'sockstat.TCP4SendErr': 0.000000,
'sockstat.TCP6SendErr': 0.000000,
'sockstat.UnixSendErr': 0.000000,
'sockstat.FDwatchSendErr': 0.000000,
'sockstat.UDP4RecvErr': 0.000000,
'sockstat.UDP6RecvErr': 0.000000,
'sockstat.TCP4RecvErr': 0.000000,
'sockstat.TCP6RecvErr': 0.000000,
'sockstat.UnixRecvErr': 0.000000,
'sockstat.FDwatchRecvErr': 0.000000,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import argparse
from pathlib import Path
import subprocess
import sys
from . import docs, error, gather_info, generate
from .const import COMPONENT_DIR
TEMPLATES = [
p.name for p in (Path(__file__).parent / "templates").glob("*") if p.is_dir()
]
def valid_integration(integration):
"""Test if it's a valid integration."""
if not (COMPONENT_DIR / integration).exists():
raise argparse.ArgumentTypeError(
f"The integration {integration} does not exist."
)
return integration
def get_arguments() -> argparse.Namespace:
"""Get parsed passed in arguments."""
parser = argparse.ArgumentParser(description="Home Assistant Scaffolder")
parser.add_argument("template", type=str, choices=TEMPLATES)
parser.add_argument(
"--develop", action="store_true", help="Automatically fill in info"
)
parser.add_argument(
"--integration", type=valid_integration, help="Integration to target."
)
arguments = parser.parse_args()
return arguments
def main():
"""Scaffold an integration."""
if not Path("requirements_all.txt").is_file():
print("Run from project root")
return 1
args = get_arguments()
info = gather_info.gather_info(args)
print()
# If we are calling scaffold on a non-existing integration,
# We're going to first make it. If we're making an integration,
# we will also make a config flow to go with it.
if info.is_new:
generate.generate("integration", info)
# If it's a new integration and it's not a config flow,
# create a config flow too.
if not args.template.startswith("config_flow"):
if info.oauth2:
template = "config_flow_oauth2"
elif info.authentication or not info.discoverable:
template = "config_flow"
else:
template = "config_flow_discovery"
generate.generate(template, info)
# If we wanted a new integration, we've already done our work.
if args.template != "integration":
generate.generate(args.template, info)
pipe_null = {} if args.develop else {"stdout": subprocess.DEVNULL}
print("Running hassfest to pick up new information.")
subprocess.run(["python", "-m", "script.hassfest"], **pipe_null)
print()
print("Running gen_requirements_all to pick up new information.")
subprocess.run(["python", "-m", "script.gen_requirements_all"], **pipe_null)
print()
print("Running script/translations_develop to pick up new translation strings.")
subprocess.run(
[
"python",
"-m",
"script.translations",
"develop",
"--integration",
info.domain,
],
**pipe_null,
)
print()
if args.develop:
print("Running tests")
print(f"$ pytest -vvv tests/components/{info.domain}")
subprocess.run(["pytest", "-vvv", "tests/components/{info.domain}"])
print()
docs.print_relevant_docs(args.template, info)
return 0
if __name__ == "__main__":
try:
sys.exit(main())
except error.ExitApp as err:
print()
print(f"Fatal Error: {err.reason}")
sys.exit(err.exit_code)
|
import numpy as np
import unittest
import chainer
from chainer import testing
from chainercv.links import FasterRCNNFPNResNet101
from chainercv.links import FasterRCNNFPNResNet50
from chainercv.links import MaskRCNNFPNResNet101
from chainercv.links import MaskRCNNFPNResNet50
from chainercv.utils.testing import attr
@testing.parameterize(*testing.product({
'model': [FasterRCNNFPNResNet50, FasterRCNNFPNResNet101,
MaskRCNNFPNResNet50, MaskRCNNFPNResNet101],
'n_fg_class': [1, 5, 20],
}))
class TestFasterRCNNFPNResNet(unittest.TestCase):
def setUp(self):
self.link = self.model(
n_fg_class=self.n_fg_class, min_size=66)
def _check_call(self):
imgs = [
np.random.uniform(-1, 1, size=(3, 48, 48)).astype(np.float32),
np.random.uniform(-1, 1, size=(3, 32, 64)).astype(np.float32),
]
x, _ = self.link.prepare(imgs)
with chainer.using_config('train', False):
self.link(self.link.xp.array(x))
@attr.slow
@attr.pfnci_skip
def test_call_cpu(self):
self._check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self._check_call()
@testing.parameterize(*testing.product({
'model': [FasterRCNNFPNResNet50, FasterRCNNFPNResNet101,
MaskRCNNFPNResNet50, MaskRCNNFPNResNet101],
'n_fg_class': [None, 10, 80],
'pretrained_model': ['coco', 'imagenet'],
}))
class TestFasterRCNNFPNResNetPretrained(unittest.TestCase):
@attr.slow
def test_pretrained(self):
kwargs = {
'n_fg_class': self.n_fg_class,
'pretrained_model': self.pretrained_model,
}
if self.pretrained_model == 'coco':
valid = self.n_fg_class in {None, 80}
elif self.pretrained_model == 'imagenet':
valid = self.n_fg_class is not None
if valid:
self.model(**kwargs)
else:
with self.assertRaises(ValueError):
self.model(**kwargs)
testing.run_module(__name__, __file__)
|
import sys
import mock
from mock import patch
from pytest import raises
from paasta_tools.cli.cmds import sysdig
@patch("paasta_tools.cli.cmds.sysdig.get_subparser", autospec=True)
def test_add_subparser(mock_get_subparser):
mock_subparsers = mock.Mock()
sysdig.add_subparser(mock_subparsers)
assert mock_get_subparser.called
@patch("paasta_tools.cli.cmds.sysdig.client", autospec=True)
def test_get_status_for_instance(mock_client):
mock_client.get_paasta_oapi_client.return_value = None
with raises(SystemExit):
sysdig.get_status_for_instance("cluster1", "my-service", "main")
mock_client.get_paasta_oapi_client.assert_called_with(cluster="cluster1")
mock_api = mock.Mock()
mock_client.get_paasta_oapi_client.return_value = mock.Mock(service=mock_api)
mock_api.status_instance.return_value = mock.Mock(marathon=False)
with raises(SystemExit):
sysdig.get_status_for_instance("cluster1", "my-service", "main")
mock_api.status_instance.assert_called_with(service="my-service", instance="main")
@patch.object(sys, "argv", ["paasta", "sysdig", "blah", "blah"])
@patch("paasta_tools.cli.cmds.sysdig.load_marathon_service_config", autospec=True)
@patch("paasta_tools.cli.cmds.sysdig.load_system_paasta_config", autospec=True)
@patch("paasta_tools.cli.cmds.sysdig.format_mesos_command", autospec=True)
@patch("paasta_tools.cli.cmds.sysdig.get_mesos_master", autospec=True)
@patch("paasta_tools.cli.cmds.sysdig._run", autospec=True)
@patch("paasta_tools.cli.cmds.sysdig.get_any_mesos_master", autospec=True)
@patch("paasta_tools.cli.cmds.sysdig.subprocess", autospec=True)
@patch("paasta_tools.cli.cmds.sysdig.pick_slave_from_status", autospec=True)
@patch("paasta_tools.cli.cmds.sysdig.get_status_for_instance", autospec=True)
def test_paasta_sysdig(
mock_get_status_for_instance,
mock_pick_slave_from_status,
mock_subprocess,
mock_get_any_mesos_master,
mock__run,
mock_get_mesos_master,
mock_format_mesos_command,
mock_load_system_paasta_config,
mock_load_marathon_service_config,
):
mock_status = mock.Mock(marathon=mock.Mock(app_id="appID1"))
mock_get_status_for_instance.return_value = mock_status
mock_args = mock.Mock(
cluster="cluster1",
service="mock_service",
instance="mock_instance",
host="host1",
mesos_id=None,
local=False,
)
mock_pick_slave_from_status.return_value = "host1"
mock_get_any_mesos_master.return_value = "master1"
mock__run.return_value = (0, "slave:command123")
sysdig.paasta_sysdig(mock_args)
mock_get_any_mesos_master.assert_called_with(
cluster="cluster1",
system_paasta_config=mock_load_system_paasta_config.return_value,
)
mock__run.assert_called_with(
"ssh -At -o StrictHostKeyChecking=no -o LogLevel=QUIET master1 "
'"sudo paasta sysdig blah blah --local"'
)
mock_subprocess.call.assert_called_with(["ssh", "-tA", "slave", "command123"])
mock__run.return_value = (1, "slave:command123")
with raises(SystemExit):
sysdig.paasta_sysdig(mock_args)
mock_args = mock.Mock(
cluster="cluster1",
service="mock_service",
instance="mock_instance",
host="host1",
mesos_id=None,
local=True,
)
mock_pick_slave_from_status.return_value = "slave1"
fake_server_config = {"url": ["http://blah"], "user": "user", "password": "pass"}
mock_load_system_paasta_config.return_value.get_marathon_servers = mock.Mock(
return_value=[fake_server_config]
)
mock_load_system_paasta_config.return_value.get_previous_marathon_servers = mock.Mock(
return_value=[fake_server_config]
)
mock_load_marathon_service_config().get_marathon_shard.return_value = None
mock_get_mesos_master.return_value = mock.Mock(host="http://foo")
sysdig.paasta_sysdig(mock_args)
mock_get_status_for_instance.assert_called_with(
cluster="cluster1", service="mock_service", instance="mock_instance"
)
mock_pick_slave_from_status.assert_called_with(status=mock_status, host="host1")
mock_format_mesos_command.assert_called_with(
"slave1", "appID1", "http://foo", "http://user:pass@blah"
)
def test_format_mesos_command():
ret = sysdig.format_mesos_command(
"slave1", "appID1", "http://foo", "http://user:pass@blah"
)
expected = 'slave1:sudo csysdig -m http://foo,http://user:pass@blah marathon.app.id="/appID1" -v mesos_tasks'
assert ret == expected
@patch("paasta_tools.cli.cmds.sysdig.calculate_remote_masters", autospec=True)
@patch("paasta_tools.cli.cmds.sysdig.find_connectable_master", autospec=True)
def test_get_any_mesos_master(
mock_find_connectable_master, mock_calculate_remote_masters, system_paasta_config
):
mock_calculate_remote_masters.return_value = ([], "fakeERROR")
with raises(SystemExit):
sysdig.get_any_mesos_master("cluster1", system_paasta_config)
mock_calculate_remote_masters.assert_called_with("cluster1", system_paasta_config)
mock_masters = mock.Mock()
mock_calculate_remote_masters.return_value = (mock_masters, "fake")
mock_find_connectable_master.return_value = (False, "fakeERROR")
with raises(SystemExit):
sysdig.get_any_mesos_master("cluster1", system_paasta_config)
mock_master = mock.Mock()
mock_find_connectable_master.return_value = (mock_master, "fake")
assert sysdig.get_any_mesos_master("cluster1", system_paasta_config) == mock_master
|
import diamond.collector
import urllib2
from StringIO import StringIO
import re
import xml.etree.cElementTree as ElementTree
class EndecaDgraphCollector(diamond.collector.Collector):
# ignore these elements, because they are of no use
IGNORE_ELEMENTS = [
'most_expensive_queries',
'general_information',
'analytics_performance',
'disk_usage',
'configupdates',
'xqueryconfigupdates',
'spelling_updates',
'precomputed_sorts',
'analytics_performance',
'cache_slices',
]
# ignore these metrics, because they can be generated by graphite
IGNORE_STATS = [
'name',
'units',
]
# set of regular expressions for matching & sub'ing.
NUMVAL_MATCH = re.compile('^[\d\.e\-\+]*$')
CHAR_BLACKLIST = re.compile('\-|\ |,|:|/|>|\(|\)')
UNDERSCORE_UNDUPE = re.compile('_+')
# endeca xml namespace
XML_NS = '{http://xmlns.endeca.com/ene/dgraph}'
def get_default_config_help(self):
config_help = super(EndecaDgraphCollector,
self).get_default_config_help()
config_help.update({
'host': "Hostname of Endeca Dgraph instance",
'port': "Port of the Dgraph API listener",
'timeout': "Timeout for http API calls",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(EndecaDgraphCollector, self).get_default_config()
config.update({
'path': 'endeca.dgraph',
'host': 'localhost',
'port': 8080,
'timeout': 1,
})
return config
def collect(self):
def makeSane(stat):
stat = self.CHAR_BLACKLIST.sub('_', stat.lower())
stat = self.UNDERSCORE_UNDUPE.sub('_', stat)
return stat
def createKey(element):
if element.attrib.get("name"):
key = element.attrib.get("name")
key = makeSane(key)
else:
key = element.tag[len(self.XML_NS):]
return key
def processElem(elem, keyList):
for k, v in elem.items():
prefix = '.'.join(keyList)
if k not in self.IGNORE_ELEMENTS and self.NUMVAL_MATCH.match(v):
k = makeSane(k)
self.publish('%s.%s' % (prefix, k), v)
def walkXML(context, elemList):
try:
for event, elem in context:
elemName = createKey(elem)
if event == 'start':
elemList.append(elemName)
if len(elem) == 0:
if set(elemList).intersection(self.IGNORE_ELEMENTS):
continue
processElem(elem, elemList)
elif event == 'end':
elemList.pop()
except Exception as e:
self.log.error('Something went wrong: %s', e)
url = 'http://%s:%d/admin?op=stats' % (self.config['host'],
self.config['port'])
try:
xml = urllib2.urlopen(url, timeout=self.config['timeout']).read()
except Exception as e:
self.log.error('Could not connect to endeca on %s: %s' % (url, e))
return {}
context = ElementTree.iterparse(StringIO(xml), events=('start', 'end'))
elemList = []
walkXML(context, elemList)
|
class AutoTermSelector(object):
'''
Reduce corpus to the X most important terms. Great for plotting a big corpus.
Will return between X/2 and X terms.
Returns the terms with the X highest absolute scores, background corpus scores, and term frequencies.
'''
@staticmethod
def reduce_terms(term_doc_matrix, scores, num_term_to_keep=None):
'''
Parameters
----------
term_doc_matrix: TermDocMatrix or descendant
scores: array-like
Same length as number of terms in TermDocMatrix.
num_term_to_keep: int, default=4000.
Should be> 0. Number of terms to keep. Will keep between num_terms_to_keep/2 and num_terms_to_keep.
Returns
-------
TermDocMatrix stripped of non-important terms., array of scores
'''
terms_to_show = AutoTermSelector.get_selected_terms(
term_doc_matrix, scores, num_term_to_keep)
return term_doc_matrix.remove_terms(set(term_doc_matrix.get_terms())
- set(terms_to_show))
@staticmethod
def get_selected_terms(term_doc_matrix, scores, num_term_to_keep=None):
'''
Parameters
----------
term_doc_matrix: TermDocMatrix or descendant
scores: array-like
Same length as number of terms in TermDocMatrix.
num_term_to_keep: int, default=4000.
Should be> 0. Number of terms to keep. Will keep between num_terms_to_keep/2 and num_terms_to_keep.
Returns
-------
set, terms that should be shown
'''
num_term_to_keep = AutoTermSelector._add_default_num_terms_to_keep(num_term_to_keep)
term_doc_freq = term_doc_matrix.get_term_freq_df()
term_doc_freq['count'] = term_doc_freq.sum(axis=1)
term_doc_freq['score'] = scores
score_terms = AutoTermSelector._get_score_terms(num_term_to_keep, term_doc_freq)
background_terms = AutoTermSelector._get_background_terms(num_term_to_keep, term_doc_matrix)
frequent_terms = AutoTermSelector._get_frequent_terms(num_term_to_keep, term_doc_freq)
terms_to_show = score_terms | background_terms | frequent_terms
return terms_to_show
@staticmethod
def _get_frequent_terms(num_term_to_keep, term_doc_freq):
return (term_doc_freq
.sort_values(by='count', ascending=False)
.iloc[:int(0.125 * num_term_to_keep)].index)
@staticmethod
def _get_background_terms(num_term_to_keep, term_doc_matrix):
return (term_doc_matrix.get_scaled_f_scores_vs_background()
.iloc[:int(0.375 * num_term_to_keep)].index)
@staticmethod
def _get_score_terms(num_term_to_keep, term_doc_freq):
sorted_tdf = term_doc_freq.sort_values(by='score', ascending=False)
return (sorted_tdf.iloc[:int(0.25 * num_term_to_keep)].index
| sorted_tdf.iloc[-int(0.25 * num_term_to_keep):].index)
@staticmethod
def _add_default_num_terms_to_keep(num_term_to_keep):
if num_term_to_keep is None:
num_term_to_keep = 4000
return num_term_to_keep
|
from datetime import timedelta
import logging
from songpal import (
ConnectChange,
ContentChange,
PowerChange,
SongpalException,
VolumeChange,
)
from homeassistant.components import media_player, songpal
from homeassistant.components.songpal.const import SET_SOUND_SETTING
from homeassistant.components.songpal.media_player import SUPPORT_SONGPAL
from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from . import (
CONF_DATA,
CONF_ENDPOINT,
CONF_NAME,
ENDPOINT,
ENTITY_ID,
FRIENDLY_NAME,
MAC,
MODEL,
SW_VERSION,
_create_mocked_device,
_patch_media_player_device,
)
from tests.async_mock import AsyncMock, MagicMock, call, patch
from tests.common import MockConfigEntry, async_fire_time_changed
def _get_attributes(hass):
state = hass.states.get(ENTITY_ID)
return state.as_dict()["attributes"]
async def test_setup_platform(hass):
"""Test the legacy setup platform."""
mocked_device = _create_mocked_device(throw_exception=True)
with _patch_media_player_device(mocked_device):
await async_setup_component(
hass,
media_player.DOMAIN,
{
media_player.DOMAIN: [
{
"platform": songpal.DOMAIN,
CONF_NAME: FRIENDLY_NAME,
CONF_ENDPOINT: ENDPOINT,
}
],
},
)
await hass.async_block_till_done()
# No device is set up
mocked_device.assert_not_called()
all_states = hass.states.async_all()
assert len(all_states) == 0
async def test_setup_failed(hass, caplog):
"""Test failed to set up the entity."""
mocked_device = _create_mocked_device(throw_exception=True)
entry = MockConfigEntry(domain=songpal.DOMAIN, data=CONF_DATA)
entry.add_to_hass(hass)
with _patch_media_player_device(mocked_device):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 0
warning_records = [x for x in caplog.records if x.levelno == logging.WARNING]
assert len(warning_records) == 2
assert not any(x.levelno == logging.ERROR for x in caplog.records)
caplog.clear()
utcnow = dt_util.utcnow()
type(mocked_device).get_supported_methods = AsyncMock()
with _patch_media_player_device(mocked_device):
async_fire_time_changed(hass, utcnow + timedelta(seconds=30))
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert not any(x.levelno == logging.WARNING for x in caplog.records)
assert not any(x.levelno == logging.ERROR for x in caplog.records)
async def test_state(hass):
"""Test state of the entity."""
mocked_device = _create_mocked_device()
entry = MockConfigEntry(domain=songpal.DOMAIN, data=CONF_DATA)
entry.add_to_hass(hass)
with _patch_media_player_device(mocked_device):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.name == FRIENDLY_NAME
assert state.state == STATE_ON
attributes = state.as_dict()["attributes"]
assert attributes["volume_level"] == 0.5
assert attributes["is_volume_muted"] is False
assert attributes["source_list"] == ["title1", "title2"]
assert attributes["source"] == "title2"
assert attributes["supported_features"] == SUPPORT_SONGPAL
device_registry = await dr.async_get_registry(hass)
device = device_registry.async_get_device(
identifiers={(songpal.DOMAIN, MAC)}, connections={}
)
assert device.connections == {(dr.CONNECTION_NETWORK_MAC, MAC)}
assert device.manufacturer == "Sony Corporation"
assert device.name == FRIENDLY_NAME
assert device.sw_version == SW_VERSION
assert device.model == MODEL
entity_registry = await er.async_get_registry(hass)
entity = entity_registry.async_get(ENTITY_ID)
assert entity.unique_id == MAC
async def test_services(hass):
"""Test services."""
mocked_device = _create_mocked_device()
entry = MockConfigEntry(domain=songpal.DOMAIN, data=CONF_DATA)
entry.add_to_hass(hass)
with _patch_media_player_device(mocked_device):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
async def _call(service, **argv):
await hass.services.async_call(
media_player.DOMAIN,
service,
{"entity_id": ENTITY_ID, **argv},
blocking=True,
)
await _call(media_player.SERVICE_TURN_ON)
await _call(media_player.SERVICE_TURN_OFF)
await _call(media_player.SERVICE_TOGGLE)
assert mocked_device.set_power.call_count == 3
mocked_device.set_power.assert_has_calls([call(True), call(False), call(False)])
await _call(media_player.SERVICE_VOLUME_SET, volume_level=0.6)
await _call(media_player.SERVICE_VOLUME_UP)
await _call(media_player.SERVICE_VOLUME_DOWN)
assert mocked_device.volume1.set_volume.call_count == 3
mocked_device.volume1.set_volume.assert_has_calls([call(60), call(51), call(49)])
await _call(media_player.SERVICE_VOLUME_MUTE, is_volume_muted=True)
mocked_device.volume1.set_mute.assert_called_once_with(True)
await _call(media_player.SERVICE_SELECT_SOURCE, source="none")
mocked_device.input1.activate.assert_not_called()
await _call(media_player.SERVICE_SELECT_SOURCE, source="title1")
mocked_device.input1.activate.assert_called_once()
await hass.services.async_call(
songpal.DOMAIN,
SET_SOUND_SETTING,
{"entity_id": ENTITY_ID, "name": "name", "value": "value"},
blocking=True,
)
mocked_device.set_sound_settings.assert_called_once_with("name", "value")
mocked_device.set_sound_settings.reset_mock()
mocked_device2 = _create_mocked_device()
sys_info = MagicMock()
sys_info.macAddr = "mac2"
sys_info.version = SW_VERSION
type(mocked_device2).get_system_info = AsyncMock(return_value=sys_info)
entry2 = MockConfigEntry(
domain=songpal.DOMAIN, data={CONF_NAME: "d2", CONF_ENDPOINT: ENDPOINT}
)
entry2.add_to_hass(hass)
with _patch_media_player_device(mocked_device2):
await hass.config_entries.async_setup(entry2.entry_id)
await hass.async_block_till_done()
await hass.services.async_call(
songpal.DOMAIN,
SET_SOUND_SETTING,
{"entity_id": "all", "name": "name", "value": "value"},
blocking=True,
)
mocked_device.set_sound_settings.assert_called_once_with("name", "value")
mocked_device2.set_sound_settings.assert_called_once_with("name", "value")
async def test_websocket_events(hass):
"""Test websocket events."""
mocked_device = _create_mocked_device()
entry = MockConfigEntry(domain=songpal.DOMAIN, data=CONF_DATA)
entry.add_to_hass(hass)
with _patch_media_player_device(mocked_device):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
mocked_device.listen_notifications.assert_called_once()
assert mocked_device.on_notification.call_count == 4
notification_callbacks = mocked_device.notification_callbacks
volume_change = MagicMock()
volume_change.mute = True
volume_change.volume = 20
await notification_callbacks[VolumeChange](volume_change)
attributes = _get_attributes(hass)
assert attributes["is_volume_muted"] is True
assert attributes["volume_level"] == 0.2
content_change = MagicMock()
content_change.is_input = False
content_change.uri = "uri1"
await notification_callbacks[ContentChange](content_change)
assert _get_attributes(hass)["source"] == "title2"
content_change.is_input = True
await notification_callbacks[ContentChange](content_change)
assert _get_attributes(hass)["source"] == "title1"
power_change = MagicMock()
power_change.status = False
await notification_callbacks[PowerChange](power_change)
assert hass.states.get(ENTITY_ID).state == STATE_OFF
async def test_disconnected(hass, caplog):
"""Test disconnected behavior."""
mocked_device = _create_mocked_device()
entry = MockConfigEntry(domain=songpal.DOMAIN, data=CONF_DATA)
entry.add_to_hass(hass)
with _patch_media_player_device(mocked_device):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
async def _assert_state():
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_UNAVAILABLE
connect_change = MagicMock()
connect_change.exception = "disconnected"
type(mocked_device).get_supported_methods = AsyncMock(
side_effect=[SongpalException(""), SongpalException(""), _assert_state]
)
notification_callbacks = mocked_device.notification_callbacks
with patch("homeassistant.components.songpal.media_player.INITIAL_RETRY_DELAY", 0):
await notification_callbacks[ConnectChange](connect_change)
warning_records = [x for x in caplog.records if x.levelno == logging.WARNING]
assert len(warning_records) == 2
assert warning_records[0].message.endswith("Got disconnected, trying to reconnect")
assert warning_records[1].message.endswith("Connection reestablished")
assert not any(x.levelno == logging.ERROR for x in caplog.records)
|
from contextvars import ContextVar
from aiohttp import web
from homeassistant.components.http.request_context import setup_request_context
async def test_request_context_middleware(aiohttp_client):
"""Test that request context is set from middleware."""
context = ContextVar("request", default=None)
app = web.Application()
async def mock_handler(request):
"""Return the real IP as text."""
request_context = context.get()
assert request_context
assert request_context == request
return web.Response(text="hi!")
app.router.add_get("/", mock_handler)
setup_request_context(app, context)
mock_api_client = await aiohttp_client(app)
resp = await mock_api_client.get("/")
assert resp.status == 200
text = await resp.text()
assert text == "hi!"
# We are outside of the context here, should be None
assert context.get() is None
|
import os
import pytest
import nikola.plugins.command.init
from nikola import __main__
from .helper import add_post_without_text, cd, copy_example_post
from .test_empty_build import ( # NOQA
test_archive_exists,
test_avoid_double_slash_in_rss,
test_check_files,
test_check_links,
test_index_in_sitemap,
)
@pytest.fixture(scope="module")
def build(target_dir):
"""Fill the site with demo content and build it."""
prepare_demo_site(target_dir)
with cd(target_dir):
__main__.main(["build"])
def prepare_demo_site(target_dir):
init_command = nikola.plugins.command.init.CommandInit()
init_command.copy_sample_site(target_dir)
init_command.create_configuration(target_dir)
posts_dir = os.path.join(target_dir, "posts")
copy_example_post(posts_dir)
add_post_without_text(posts_dir)
|
import warnings
from collections import OrderedDict
from django.core import checks
from django.db import models
from django.utils.translation import gettext_lazy as _
from shop import deferred
from shop.models.fields import JSONField
from shop.models.customer import CustomerModel
from shop.models.product import BaseProduct
from shop.modifiers.pool import cart_modifiers_pool
from shop.money import Money
class CartItemManager(models.Manager):
"""
Customized model manager for our CartItem model.
"""
def get_or_create(self, **kwargs):
"""
Create a unique cart item. If the same product exists already in the given cart,
increase its quantity, if the product in the cart seems to be the same.
"""
cart = kwargs.pop('cart')
product = kwargs.pop('product')
quantity = int(kwargs.pop('quantity', 1))
# add a new item to the cart, or reuse an existing one, increasing the quantity
watched = not quantity
cart_item = product.is_in_cart(cart, watched=watched, **kwargs)
if cart_item:
if not watched:
cart_item.quantity += quantity
created = False
else:
cart_item = self.model(cart=cart, product=product, quantity=quantity, **kwargs)
created = True
cart_item.save()
return cart_item, created
def filter_cart_items(self, cart, request):
"""
Use this method to fetch items for shopping from the cart. It rearranges the result set
according to the defined modifiers.
"""
cart_items = self.filter(cart=cart, quantity__gt=0).order_by('updated_at')
for modifier in cart_modifiers_pool.get_all_modifiers():
cart_items = modifier.arrange_cart_items(cart_items, request)
return cart_items
def filter_watch_items(self, cart, request):
"""
Use this method to fetch items from the watch list. It rearranges the result set
according to the defined modifiers.
"""
watch_items = self.filter(cart=cart, quantity=0)
for modifier in cart_modifiers_pool.get_all_modifiers():
watch_items = modifier.arrange_watch_items(watch_items, request)
return watch_items
class BaseCartItem(models.Model, metaclass=deferred.ForeignKeyBuilder):
"""
This is a holder for the quantity of items in the cart and, obviously, a
pointer to the actual Product being purchased
"""
cart = deferred.ForeignKey(
'BaseCart',
on_delete=models.CASCADE,
related_name='items',
)
product = deferred.ForeignKey(
BaseProduct,
on_delete=models.CASCADE,
)
product_code = models.CharField(
_("Product code"),
max_length=255,
null=True,
blank=True,
help_text=_("Product code of added item."),
)
updated_at = models.DateTimeField(
_("Updated at"),
auto_now=True,
)
extra = JSONField(verbose_name=_("Arbitrary information for this cart item"))
objects = CartItemManager()
class Meta:
abstract = True
verbose_name = _("Cart item")
verbose_name_plural = _("Cart items")
@classmethod
def check(cls, **kwargs):
errors = super().check(**kwargs)
allowed_types = ['IntegerField', 'SmallIntegerField', 'PositiveIntegerField',
'PositiveSmallIntegerField', 'DecimalField', 'FloatField']
for field in cls._meta.fields:
if field.attname == 'quantity':
if field.get_internal_type() not in allowed_types:
msg = "Class `{}.quantity` must be of one of the types: {}."
errors.append(checks.Error(msg.format(cls.__name__, allowed_types)))
break
else:
msg = "Class `{}` must implement a field named `quantity`."
errors.append(checks.Error(msg.format(cls.__name__)))
return errors
def __init__(self, *args, **kwargs):
# reduce the given fields to what the model actually can consume
all_field_names = [field.name for field in self._meta.get_fields(include_parents=True)]
model_kwargs = {k: v for k, v in kwargs.items() if k in all_field_names}
super().__init__(*args, **model_kwargs)
self.extra_rows = OrderedDict()
self._dirty = True
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
self.cart.save(update_fields=['updated_at'])
self._dirty = True
def update(self, request):
"""
Loop over all registered cart modifier, change the price per cart item and optionally add
some extra rows.
"""
if not self._dirty:
return
self.refresh_from_db()
self.extra_rows = OrderedDict() # reset the dictionary
for modifier in cart_modifiers_pool.get_all_modifiers():
modifier.process_cart_item(self, request)
self._dirty = False
CartItemModel = deferred.MaterializedModel(BaseCartItem)
class CartManager(models.Manager):
"""
The Model Manager for any Cart inheriting from BaseCart.
"""
def get_from_request(self, request):
"""
Return the cart for current customer.
"""
if request.customer.is_visitor:
raise self.model.DoesNotExist("Cart for visiting customer does not exist.")
if not hasattr(request, '_cached_cart') or request._cached_cart.customer.user_id != request.customer.user_id:
request._cached_cart, created = self.get_or_create(customer=request.customer)
return request._cached_cart
def get_or_create_from_request(self, request):
has_cached_cart = hasattr(request, '_cached_cart')
if request.customer.is_visitor:
request.customer = CustomerModel.objects.get_or_create_from_request(request)
has_cached_cart = False
if not has_cached_cart or request._cached_cart.customer.user_id != request.customer.user_id:
request._cached_cart, created = self.get_or_create(customer=request.customer)
return request._cached_cart
class BaseCart(models.Model, metaclass=deferred.ForeignKeyBuilder):
"""
The fundamental part of a shopping cart.
"""
customer = deferred.OneToOneField(
'BaseCustomer',
on_delete=models.CASCADE,
related_name='cart',
verbose_name=_("Customer"),
)
created_at = models.DateTimeField(
_("Created at"),
auto_now_add=True,
)
updated_at = models.DateTimeField(
_("Updated at"),
auto_now=True,
)
extra = JSONField(verbose_name=_("Arbitrary information for this cart"))
# our CartManager determines the cart object from the request.
objects = CartManager()
class Meta:
abstract = True
verbose_name = _("Shopping Cart")
verbose_name_plural = _("Shopping Carts")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# That will hold things like tax totals or total discount
self.extra_rows = OrderedDict()
self._cached_cart_items = None
self._dirty = True
def save(self, force_update=False, *args, **kwargs):
if self.pk or force_update is False:
super().save(force_update=force_update, *args, **kwargs)
self._dirty = True
def update(self, request, raise_exception=False):
"""
This should be called after a cart item changed quantity, has been added or removed.
It will loop over all items in the cart, and call all the configured cart modifiers.
After this is done, it will compute and update the order's total and subtotal fields, along
with any supplement added along the way by modifiers.
Note that theses added fields are not stored - we actually want to
reflect rebate and tax changes on the *cart* items, but we don't want
that for the order items (since they are legally binding after the
"purchase" button was pressed)
"""
if not self._dirty:
return
if self._cached_cart_items:
items = self._cached_cart_items
else:
items = CartItemModel.objects.filter_cart_items(self, request)
# This calls all the pre_process_cart methods and the pre_process_cart_item for each item,
# before processing the cart. This allows to prepare and collect data on the cart.
for modifier in cart_modifiers_pool.get_all_modifiers():
modifier.pre_process_cart(self, request, raise_exception)
for item in items:
modifier.pre_process_cart_item(self, item, request, raise_exception)
self.extra_rows = OrderedDict() # reset the dictionary
self.subtotal = 0 # reset the subtotal
for item in items:
# item.update iterates over all cart modifiers and invokes method `process_cart_item`
item.update(request)
self.subtotal += item.line_total
# Iterate over the registered modifiers, to process the cart's summary
for modifier in cart_modifiers_pool.get_all_modifiers():
for item in items:
modifier.post_process_cart_item(self, item, request)
modifier.process_cart(self, request)
# This calls the post_process_cart method from cart modifiers, if any.
# It allows for a last bit of processing on the "finished" cart, before
# it is displayed
for modifier in reversed(cart_modifiers_pool.get_all_modifiers()):
modifier.post_process_cart(self, request)
# Cache updated cart items
self._cached_cart_items = items
self._dirty = False
def empty(self):
"""
Remove the cart with all its items.
"""
if self.pk:
self.items.all().delete()
self.delete()
def merge_with(self, other_cart):
"""
Merge the contents of the other cart into this one, afterwards delete it.
This is done item by item, so that duplicate items increase the quantity.
"""
# iterate over the cart and add quantities for items from other cart considered as equal
if self.id == other_cart.id:
raise RuntimeError("Can not merge cart with itself")
for item in self.items.all():
other_item = item.product.is_in_cart(other_cart, extra=item.extra)
if other_item:
item.quantity += other_item.quantity
item.save()
other_item.delete()
# the remaining items from the other cart are merged into this one
other_cart.items.update(cart=self)
other_cart.delete()
def __str__(self):
return "{}".format(self.pk) if self.pk else "(unsaved)"
@property
def num_items(self):
"""
Returns the number of items in the cart.
"""
return self.items.filter(quantity__gt=0).count()
@property
def total_quantity(self):
"""
Returns the total quantity of all items in the cart.
"""
aggr = self.items.aggregate(quantity=models.Sum('quantity'))
return aggr['quantity'] or 0
# if we would know, that self.items is already evaluated, then this might be faster:
# return sum([ci.quantity for ci in self.items.all()])
@property
def is_empty(self):
return self.num_items == 0 and self.total_quantity == 0
def get_caption_data(self):
warnings.warn("This method is deprecated")
return {'num_items': self.num_items, 'total_quantity': self.total_quantity,
'subtotal': self.subtotal, 'total': self.total}
@classmethod
def get_default_caption_data(cls):
warnings.warn("This method is deprecated")
return {'num_items': 0, 'total_quantity': 0, 'subtotal': Money(), 'total': Money()}
CartModel = deferred.MaterializedModel(BaseCart)
|
from os.path import exists, splitext
from markups import get_markup_for_file_name, find_markup_class_by_name
from markups.common import MODULE_HOME_PAGE
from ReText import app_version, globalSettings, converterprocess
from ReText.editor import ReTextEdit
from ReText.highlighter import ReTextHighlighter
from ReText.preview import ReTextPreview
try:
import enchant
except ImportError:
enchant = None
from PyQt5.QtCore import pyqtSignal, Qt, QDir, QFile, QFileInfo, QPoint, QTextStream, QTimer, QUrl
from PyQt5.QtGui import QPalette, QTextCursor, QTextDocument
from PyQt5.QtWidgets import QApplication, QTextEdit, QSplitter, QMessageBox
try:
from ReText.webkitpreview import ReTextWebKitPreview
except ImportError:
ReTextWebKitPreview = None
try:
from ReText.webenginepreview import ReTextWebEnginePreview
except ImportError:
ReTextWebEnginePreview = None
PreviewDisabled, PreviewLive, PreviewNormal = range(3)
class ReTextTab(QSplitter):
fileNameChanged = pyqtSignal()
modificationStateChanged = pyqtSignal()
activeMarkupChanged = pyqtSignal()
# Make _fileName a read-only property to make sure that any
# modification happens through the proper functions. These functions
# will make sure that the fileNameChanged signal is emitted when
# applicable.
@property
def fileName(self):
return self._fileName
def __init__(self, parent, fileName, previewState=PreviewDisabled):
super().__init__(Qt.Horizontal, parent=parent)
self.p = parent
self._fileName = fileName
self.editBox = ReTextEdit(self)
self.previewBox = self.createPreviewBox(self.editBox)
self.activeMarkupClass = None
self.markup = None
self.converted = None
self.previewState = previewState
self.previewOutdated = False
self.conversionPending = False
self.cssFileExists = False
self.converterProcess = converterprocess.ConverterProcess()
self.converterProcess.conversionDone.connect(self.updatePreviewBox)
textDocument = self.editBox.document()
self.highlighter = ReTextHighlighter(textDocument)
if enchant is not None and parent.actionEnableSC.isChecked():
self.highlighter.dictionary = enchant.Dict(parent.sl or None)
# Rehighlighting is tied to the change in markup class that
# happens at the end of this function
self.editBox.textChanged.connect(self.triggerPreviewUpdate)
self.editBox.undoAvailable.connect(parent.actionUndo.setEnabled)
self.editBox.redoAvailable.connect(parent.actionRedo.setEnabled)
self.editBox.copyAvailable.connect(parent.enableCopy)
# Give both boxes a minimum size so the minimumSizeHint will be
# ignored when splitter.setSizes is called below
for widget in self.editBox, self.previewBox:
widget.setMinimumWidth(125)
self.addWidget(widget)
self.setSizes((50, 50))
self.setChildrenCollapsible(False)
textDocument.modificationChanged.connect(self.handleModificationChanged)
self.updateActiveMarkupClass()
def handleModificationChanged(self):
self.modificationStateChanged.emit()
def createPreviewBox(self, editBox):
# Use closures to avoid a hard reference from ReTextWebKitPreview
# to self, which would keep the tab and its resources alive
# even after other references to it have disappeared.
def editorPositionToSourceLine(editorPosition):
viewportPosition = editorPosition - editBox.verticalScrollBar().value()
sourceLine = editBox.cursorForPosition(QPoint(0,viewportPosition)).blockNumber()
return sourceLine
def sourceLineToEditorPosition(sourceLine):
doc = editBox.document()
block = doc.findBlockByNumber(sourceLine)
rect = doc.documentLayout().blockBoundingRect(block)
return rect.top()
if ReTextWebKitPreview and globalSettings.useWebKit:
preview = ReTextWebKitPreview(self,
editorPositionToSourceLine,
sourceLineToEditorPosition)
elif ReTextWebEnginePreview and globalSettings.useWebEngine:
preview = ReTextWebEnginePreview(self,
editorPositionToSourceLine,
sourceLineToEditorPosition)
else:
preview = ReTextPreview(self)
return preview
def getActiveMarkupClass(self):
'''
Return the currently active markup class for this tab.
No objects should be created of this class, it should
only be used to retrieve markup class specific information.
'''
return self.activeMarkupClass
def updateActiveMarkupClass(self):
'''
Update the active markup class based on the default class and
the current filename. If the active markup class changes, the
highlighter is rerun on the input text, the markup object of
this tab is replaced with one of the new class and the
activeMarkupChanged signal is emitted.
'''
previousMarkupClass = self.activeMarkupClass
self.activeMarkupClass = find_markup_class_by_name(globalSettings.defaultMarkup)
if self._fileName:
markupClass = get_markup_for_file_name(
self._fileName, return_class=True)
if markupClass:
self.activeMarkupClass = markupClass
if self.activeMarkupClass != previousMarkupClass:
self.highlighter.docType = self.activeMarkupClass.name if self.activeMarkupClass else None
self.highlighter.rehighlight()
self.activeMarkupChanged.emit()
self.triggerPreviewUpdate()
def getDocumentTitleFromConverted(self, converted):
if converted:
try:
return converted.get_document_title()
except Exception:
self.p.printError()
return self.getBaseName()
def getBaseName(self):
if self._fileName:
fileinfo = QFileInfo(self._fileName)
basename = fileinfo.completeBaseName()
return (basename if basename else fileinfo.fileName())
return self.tr("New document")
def getHtmlFromConverted(self, converted, includeStyleSheet=True, webenv=False):
if converted is None:
markupClass = self.getActiveMarkupClass()
errMsg = self.tr('Could not parse file contents, check if '
'you have the <a href="%s">necessary module</a> '
'installed!')
try:
errMsg %= markupClass.attributes[MODULE_HOME_PAGE]
except (AttributeError, KeyError):
# Remove the link if markupClass doesn't have the needed attribute
errMsg = errMsg.replace('<a href="%s">', '').replace('</a>', '')
return '<p style="color: red">%s</p>' % errMsg
headers = ''
if includeStyleSheet and self.p.ss is not None:
headers += '<style type="text/css">\n' + self.p.ss + '</style>\n'
elif includeStyleSheet:
style = 'td, th { border: 1px solid #c3c3c3; padding: 0 3px 0 3px; }\n'
style += 'table { border-collapse: collapse; }\n'
style += 'img { max-width: 100%; }\n'
# QTextDocument seems to use media=screen even for printing
if globalSettings.useWebKit:
# https://github.com/retext-project/retext/pull/187
palette = QApplication.palette()
style += '@media screen { html { color: %s; } }\n' % palette.color(QPalette.WindowText).name()
# https://github.com/retext-project/retext/issues/408
style += '@media print { html { background-color: white; } }\n'
headers += '<style type="text/css">\n' + style + '</style>\n'
baseName = self.getBaseName()
if self.cssFileExists:
headers += ('<link rel="stylesheet" type="text/css" href="%s.css">\n'
% baseName)
headers += ('<meta name="generator" content="ReText %s">\n' % app_version)
return converted.get_whole_html(
custom_headers=headers, include_stylesheet=includeStyleSheet,
fallback_title=baseName, webenv=webenv)
def getDocumentForExport(self, includeStyleSheet=True, webenv=False):
markupClass = self.getActiveMarkupClass()
if markupClass and markupClass.available():
exportMarkup = markupClass(filename=self._fileName)
text = self.editBox.toPlainText()
converted = exportMarkup.convert(text)
else:
converted = None
return (self.getDocumentTitleFromConverted(converted),
self.getHtmlFromConverted(converted, includeStyleSheet=includeStyleSheet, webenv=webenv),
self.previewBox)
def updatePreviewBox(self):
self.conversionPending = False
try:
self.converted = self.converterProcess.get_result()
except converterprocess.MarkupNotAvailableError:
self.converted = None
except converterprocess.ConversionError:
return self.p.printError()
if isinstance(self.previewBox, QTextEdit):
scrollbar = self.previewBox.verticalScrollBar()
scrollbarValue = scrollbar.value()
distToBottom = scrollbar.maximum() - scrollbarValue
try:
html = self.getHtmlFromConverted(self.converted)
except Exception:
return self.p.printError()
if isinstance(self.previewBox, QTextEdit):
self.previewBox.setHtml(html)
self.previewBox.document().setDefaultFont(globalSettings.font)
# If scrollbar was at bottom (and that was not the same as top),
# set it to bottom again
if scrollbarValue:
newValue = scrollbar.maximum() - distToBottom
scrollbar.setValue(newValue)
else:
self.previewBox.updateFontSettings()
# Always provide a baseUrl otherwise QWebView will
# refuse to show images or other external objects
if self._fileName:
baseUrl = QUrl.fromLocalFile(self._fileName)
else:
baseUrl = QUrl.fromLocalFile(QDir.currentPath())
self.previewBox.setHtml(html, baseUrl)
if self.previewOutdated:
self.triggerPreviewUpdate()
def triggerPreviewUpdate(self):
self.previewOutdated = True
if self.previewState == PreviewDisabled:
return
if not self.conversionPending:
self.conversionPending = True
QTimer.singleShot(500, self.startPendingConversion)
def startPendingConversion(self):
self.previewOutdated = False
requested_extensions = ['ReText.mdx_posmap'] if globalSettings.syncScroll else []
self.converterProcess.start_conversion(self.getActiveMarkupClass().name,
self.fileName,
requested_extensions,
self.editBox.toPlainText(),
QDir.currentPath())
def updateBoxesVisibility(self):
self.editBox.setVisible(self.previewState < PreviewNormal)
self.previewBox.setVisible(self.previewState > PreviewDisabled)
def rebuildPreviewBox(self):
self.previewBox.disconnectExternalSignals()
self.previewBox.setParent(None)
self.previewBox.deleteLater()
self.previewBox = self.createPreviewBox(self.editBox)
self.previewBox.setMinimumWidth(125)
self.addWidget(self.previewBox)
self.setSizes((50, 50))
self.triggerPreviewUpdate()
self.updateBoxesVisibility()
def detectFileEncoding(self, fileName):
'''
Detect content encoding of specific file.
It will return None if it can't determine the encoding.
'''
try:
import chardet
except ImportError:
return
with open(fileName, 'rb') as inputFile:
raw = inputFile.read(2048)
result = chardet.detect(raw)
if result['confidence'] > 0.9:
if result['encoding'].lower() == 'ascii':
# UTF-8 files can be falsely detected as ASCII files if they
# don't contain non-ASCII characters in first 2048 bytes.
# We map ASCII to UTF-8 to avoid such situations.
return 'utf-8'
return result['encoding']
def readTextFromFile(self, fileName=None, encoding=None):
previousFileName = self._fileName
if fileName:
self._fileName = fileName
# Only try to detect encoding if it is not specified
if encoding is None and globalSettings.detectEncoding:
encoding = self.detectFileEncoding(self._fileName)
# TODO: why do we open the file twice: for detecting encoding
# and for actual read? Can we open it just once?
openfile = QFile(self._fileName)
openfile.open(QFile.ReadOnly)
stream = QTextStream(openfile)
encoding = encoding or globalSettings.defaultCodec
if encoding:
stream.setCodec(encoding)
# If encoding is specified or detected, we should save the file with
# the same encoding
self.editBox.document().setProperty("encoding", encoding)
text = stream.readAll()
openfile.close()
if previousFileName != self._fileName:
self.updateActiveMarkupClass()
self.editBox.setPlainText(text)
self.editBox.document().setModified(False)
cssFileName = self.getBaseName() + '.css'
self.cssFileExists = QFile.exists(cssFileName)
if previousFileName != self._fileName:
self.fileNameChanged.emit()
def writeTextToFile(self, fileName=None):
# Just writes the text to file, without any changes to tab object
# Used directly for e.g. export extensions
# Get text from the cursor to avoid tweaking special characters,
# see https://bugreports.qt.io/browse/QTBUG-57552 and
# https://github.com/retext-project/retext/issues/216
cursor = self.editBox.textCursor()
cursor.select(QTextCursor.Document)
text = cursor.selectedText().replace('\u2029', '\n')
savefile = QFile(fileName or self._fileName)
result = savefile.open(QFile.WriteOnly)
if result:
savestream = QTextStream(savefile)
# Save the file with original encoding
encoding = self.editBox.document().property("encoding")
if encoding is not None:
savestream.setCodec(encoding)
savestream << text
savefile.close()
return result
def saveTextToFile(self, fileName=None):
# Sets fileName as tab fileName and writes the text to that file
if self._fileName:
self.p.fileSystemWatcher.removePath(self._fileName)
result = self.writeTextToFile(fileName)
if result:
self.editBox.document().setModified(False)
self.p.fileSystemWatcher.addPath(fileName or self._fileName)
if fileName and self._fileName != fileName:
self._fileName = fileName
self.updateActiveMarkupClass()
self.fileNameChanged.emit()
return result
def goToLine(self,line):
block = self.editBox.document().findBlockByLineNumber(line)
if block.isValid():
newCursor = QTextCursor(block)
self.editBox.setTextCursor(newCursor)
def find(self, text, flags, replaceText=None, wrap=False):
cursor = self.editBox.textCursor()
if wrap and flags & QTextDocument.FindBackward:
cursor.movePosition(QTextCursor.End)
elif wrap:
cursor.movePosition(QTextCursor.Start)
if replaceText is not None and cursor.selectedText() == text:
newCursor = cursor
else:
newCursor = self.editBox.document().find(text, cursor, flags)
if not newCursor.isNull():
if replaceText is not None:
newCursor.insertText(replaceText)
newCursor.movePosition(QTextCursor.Left, QTextCursor.MoveAnchor, len(replaceText))
newCursor.movePosition(QTextCursor.Right, QTextCursor.KeepAnchor, len(replaceText))
self.editBox.setTextCursor(newCursor)
if self.editBox.cursorRect().bottom() >= self.editBox.height() - 3:
scrollValue = self.editBox.verticalScrollBar().value()
areaHeight = self.editBox.fontMetrics().height()
self.editBox.verticalScrollBar().setValue(scrollValue + areaHeight)
return True
if not wrap:
return self.find(text, flags, replaceText, True)
return False
def replaceAll(self, text, replaceText):
cursor = self.editBox.textCursor()
cursor.beginEditBlock()
cursor.movePosition(QTextCursor.Start)
flags = QTextDocument.FindFlags()
cursor = lastCursor = self.editBox.document().find(text, cursor, flags)
while not cursor.isNull():
cursor.insertText(replaceText)
lastCursor = cursor
cursor = self.editBox.document().find(text, cursor, flags)
if not lastCursor.isNull():
lastCursor.movePosition(QTextCursor.Left, QTextCursor.MoveAnchor, len(replaceText))
lastCursor.movePosition(QTextCursor.Right, QTextCursor.KeepAnchor, len(replaceText))
self.editBox.setTextCursor(lastCursor)
self.editBox.textCursor().endEditBlock()
return not lastCursor.isNull()
def openSourceFile(self, linkPath):
"""Finds and opens the source file for link target fileToOpen.
When links like [test](test) are clicked, the file test.md is opened.
It has to be located next to the current opened file.
Relative paths like [test](../test) or [test](folder/test) are also possible.
"""
fileToOpen = self.resolveSourceFile(linkPath)
if exists(fileToOpen) and get_markup_for_file_name(fileToOpen, return_class=True):
self.p.openFileWrapper(fileToOpen)
return fileToOpen
if get_markup_for_file_name(fileToOpen, return_class=True):
if not QFile.exists(fileToOpen) and QFileInfo(fileToOpen).dir().exists():
if self.promptFileCreation(fileToOpen):
self.p.openFileWrapper(fileToOpen)
return fileToOpen
def promptFileCreation(self, fileToCreate):
"""
Prompt user if a file should be created for the clicked link,
and try to create it. Return True on success.
"""
buttonReply = QMessageBox.question(self, self.tr('Create missing file?'),
self.tr("The file '%s' does not exist.\n\nDo you want to create it?") % fileToCreate,
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if buttonReply == QMessageBox.Yes:
return self.createFile(fileToCreate)
elif buttonReply == QMessageBox.No:
return False
def resolveSourceFile(self, linkPath):
"""
Finds the actual path of the file to open in a new tab.
When the link has no extension, eg: [Test](test), the extension of the current file is assumed
(eg test.md for a markdown file).
When the link is an html file eg: [Test](test.html), the extension of the current file is assumed
(eg test.md for a markdown file).
Relative paths like [test](../test) or [test](folder/test) are also possible.
"""
basename, ext = splitext(linkPath)
if self.fileName:
currentExt = splitext(self.fileName)[1]
if ext in ('.html', '') and (exists(basename+currentExt) or not exists(linkPath)):
ext = currentExt
return basename+ext
def createFile(self, fileToCreate):
"""Try to create file, return True if successful"""
try:
# Create file:
open(fileToCreate, 'x').close()
return True
except OSError as err:
QMessageBox.warning(self, self.tr("File could not be created"),
self.tr("Could not create file '%s': %s") % (fileToCreate, err))
return False
|
import discord
from redbot.core import commands
from redbot.core.i18n import Translator
from .installable import InstalledModule
_ = Translator("Koala", __file__)
class InstalledCog(InstalledModule):
@classmethod
async def convert(cls, ctx: commands.Context, arg: str) -> InstalledModule:
downloader = ctx.bot.get_cog("Downloader")
if downloader is None:
raise commands.CommandError(_("No Downloader cog found."))
cog = discord.utils.get(await downloader.installed_cogs(), name=arg)
if cog is None:
raise commands.BadArgument(
_("Cog `{cog_name}` is not installed.").format(cog_name=arg)
)
return cog
|
import tensorflow as tf
from tensorflow.keras.layers import Layer # type: ignore
from tensorflow.keras import activations
from tensorflow.keras import initializers
from typing import List, Optional, Text, Tuple
import tensornetwork as tn
from tensornetwork.network_components import Node
import numpy as np
import math
# pytype: disable=module-attr
@tf.keras.utils.register_keras_serializable(package='tensornetwork')
# pytype: enable=module-attr
class DenseMPO(Layer):
"""Matrix Product Operator (MPO) TN layer.
This layer can take an input shape of arbitrary dimension, with the first
dimension expected to be a batch dimension. The weight matrix will be
constructed from and applied to the last input dimension.
Example:
::
# as first layer in a sequential model:
model = Sequential()
model.add(
DenseMPO(1024, num_nodes=4, bond_dim=8, activation='relu',
input_shape=(1024,)))
# now the model will take as input arrays of shape (*, 1024)
# and output arrays of shape (*, 1024).
# After the first layer, you don't need to specify
# the size of the input anymore:
model.add(DenseMPO(1024, num_nodes=4, bond_dim=8, activation='relu'))
Args:
output_dim: Positive integer, dimensionality of the output space.
num_nodes: Positive integer, number of nodes in the MPO.
Note input_shape[-1]**(1. / num_nodes) and output_dim**(1. / num_nodes)
must both be round.
bond_dim: Positive integer, size of the intermediate dimension.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the node weight matrices.
bias_initializer: Initializer for the bias vector.
Input shape:
N-D tensor with shape: `(batch_size, ..., input_dim)`.
Output shape:
N-D tensor with shape: `(batch_size, ..., output_dim)`.
"""
def __init__(self,
output_dim: int,
num_nodes: int,
bond_dim: int,
use_bias: Optional[bool] = True,
activation: Optional[Text] = None,
kernel_initializer: Optional[Text] = 'glorot_uniform',
bias_initializer: Optional[Text] = 'zeros',
**kwargs) -> None:
# Allow specification of input_dim instead of input_shape,
# for compatability with Keras layers that support this
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
assert num_nodes > 2, 'Need at least 3 nodes to create MPO.'
super().__init__(**kwargs)
self.output_dim = output_dim
self.num_nodes = num_nodes
self.bond_dim = bond_dim
self.nodes = []
self.use_bias = use_bias
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape: List[int]) -> None:
# Disable the attribute-defined-outside-init violations in this function
# pylint: disable=attribute-defined-outside-init
if input_shape[-1] is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
# Try to convert n to an integer. tensorflow.compat.v1 uses a partially
# integer compatible interface that does not implement the __pow__
# function. __int__ is implemented, so calling this first is necessary.
input_dim = int(input_shape[-1])
def is_perfect_root(n, n_nodes):
root = n**(1. / n_nodes)
return round(root)**n_nodes == n
# Ensure the MPO dimensions will work
assert is_perfect_root(input_dim, self.num_nodes), \
f'Input dim incorrect.\
{input_dim}**(1. / {self.num_nodes}) must be round.'
assert is_perfect_root(self.output_dim, self.num_nodes), \
f'Output dim incorrect. \
{self.output_dim}**(1. / {self.num_nodes}) must be round.'
super().build(input_shape)
self.in_leg_dim = math.ceil(input_dim**(1. / self.num_nodes))
self.out_leg_dim = math.ceil(self.output_dim**(1. / self.num_nodes))
self.nodes.append(
self.add_weight(name='end_node_first',
shape=(self.in_leg_dim, self.bond_dim,
self.out_leg_dim),
trainable=True,
initializer=self.kernel_initializer))
for i in range(self.num_nodes - 2):
self.nodes.append(
self.add_weight(name=f'middle_node_{i}',
shape=(self.in_leg_dim, self.bond_dim, self.bond_dim,
self.out_leg_dim),
trainable=True,
initializer=self.kernel_initializer))
self.nodes.append(
self.add_weight(name='end_node_last',
shape=(self.in_leg_dim, self.bond_dim,
self.out_leg_dim),
trainable=True,
initializer=self.kernel_initializer))
self.bias_var = self.add_weight(
name='bias',
shape=(self.output_dim,),
trainable=True,
initializer=self.bias_initializer) if self.use_bias else None
def call(self, inputs: tf.Tensor, **kwargs) -> tf.Tensor: # pylint: disable=unused-argument
def f(x: tf.Tensor, nodes: List[Node], num_nodes: int, in_leg_dim: int,
output_dim: int, use_bias: bool, bias_var: tf.Tensor) -> tf.Tensor:
l = [in_leg_dim] * num_nodes
input_reshaped = tf.reshape(x, tuple(l))
x_node = tn.Node(input_reshaped, name='xnode', backend="tensorflow")
tn_nodes = []
for i, v in enumerate(nodes):
tn_nodes.append(tn.Node(v, name=f'node_{i}', backend="tensorflow"))
# Connect every node to input node
x_node[i] ^ tn_nodes[i][0]
# Connect all core nodes
tn_nodes[0][1] ^ tn_nodes[1][1]
for i, _ in enumerate(tn_nodes):
if len(tn_nodes[i].shape) == 4:
tn_nodes[i][2] ^ tn_nodes[i + 1][1]
# The TN should now look like this
# | | |
# 1 --- 2 --- ...
# \ / /
# x
# Contract TN using zipper algorithm
temp = x_node @ tn_nodes[0]
for i in range(1, len(tn_nodes)):
temp = temp @ tn_nodes[i]
result = tf.reshape(temp.tensor, (-1, output_dim))
if use_bias:
result += bias_var
return result
input_shape = list(inputs.shape)
inputs = tf.reshape(inputs, (-1, input_shape[-1]))
result = tf.vectorized_map(
lambda vec: f(vec, self.nodes, self.num_nodes, self.in_leg_dim, self.
output_dim, self.use_bias, self.bias_var), inputs)
if self.activation is not None:
result = self.activation(result)
result = tf.reshape(result, [-1] + input_shape[1:-1] + [self.output_dim,])
return result
def compute_output_shape(self, input_shape: List[int]) -> Tuple[int, int]:
return tuple(input_shape[0:-1]) + (self.output_dim,)
def get_config(self) -> dict:
"""Returns the config of the layer.
The same layer can be reinstantiated later
(without its trained weights) from this configuration.
Returns:
Python dictionary containing the configuration of the layer.
"""
config = {}
# Include the MPO-specific arguments
args = ['output_dim', 'num_nodes', 'bond_dim', 'use_bias']
for arg in args:
config[arg] = getattr(self, arg)
# Serialize the activation
config['activation'] = activations.serialize(getattr(self, 'activation'))
# Serialize the initializers
custom_initializers = ['kernel_initializer', 'bias_initializer']
for initializer_arg in custom_initializers:
config[initializer_arg] = initializers.serialize(
getattr(self, initializer_arg))
# Get base config
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
|
from django.db import migrations
def migrate_dictionary(apps, schema_editor):
Dictionary = apps.get_model("trans", "Dictionary")
Project = apps.get_model("trans", "Project")
Change = apps.get_model("trans", "Change")
Glossary = apps.get_model("glossary", "Glossary")
Term = apps.get_model("glossary", "Term")
db_alias = schema_editor.connection.alias
# Create glossaries for all projects
glossaries = {
project.pk: Glossary.objects.create(
name=project.name, color="silver", project=project
)
for project in Project.objects.using(db_alias).iterator()
}
# Migrate dictionary to terms
for dictionary in Dictionary.objects.using(db_alias).iterator():
# Create new term
term = Term.objects.create(
glossary=glossaries[dictionary.project_id],
language=dictionary.language,
source=dictionary.source,
target=dictionary.target,
)
# Adjust change links to terms
Change.objects.filter(dictionary=dictionary).update(glossary_term=term)
class Migration(migrations.Migration):
dependencies = [
("glossary", "0001_initial"),
("trans", "0085_change_glossary_term"),
]
operations = [
migrations.RunPython(
migrate_dictionary, migrations.RunPython.noop, elidable=True
)
]
|
from datetime import timedelta
from homeassistant.const import CONF_NAME
from . import CONF_WALLETS, IotaDevice
ATTR_TESTNET = "testnet"
ATTR_URL = "url"
CONF_IRI = "iri"
CONF_SEED = "seed"
CONF_TESTNET = "testnet"
SCAN_INTERVAL = timedelta(minutes=3)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the IOTA sensor."""
iota_config = discovery_info
sensors = [
IotaBalanceSensor(wallet, iota_config) for wallet in iota_config[CONF_WALLETS]
]
sensors.append(IotaNodeSensor(iota_config=iota_config))
add_entities(sensors)
class IotaBalanceSensor(IotaDevice):
"""Implement an IOTA sensor for displaying wallets balance."""
def __init__(self, wallet_config, iota_config):
"""Initialize the sensor."""
super().__init__(
name=wallet_config[CONF_NAME],
seed=wallet_config[CONF_SEED],
iri=iota_config[CONF_IRI],
is_testnet=iota_config[CONF_TESTNET],
)
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} Balance"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "IOTA"
def update(self):
"""Fetch new balance from IRI."""
self._state = self.api.get_inputs()["totalBalance"]
class IotaNodeSensor(IotaDevice):
"""Implement an IOTA sensor for displaying attributes of node."""
def __init__(self, iota_config):
"""Initialize the sensor."""
super().__init__(
name="Node Info",
seed=None,
iri=iota_config[CONF_IRI],
is_testnet=iota_config[CONF_TESTNET],
)
self._state = None
self._attr = {ATTR_URL: self.iri, ATTR_TESTNET: self.is_testnet}
@property
def name(self):
"""Return the name of the sensor."""
return "IOTA Node"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._attr
def update(self):
"""Fetch new attributes IRI node."""
node_info = self.api.get_node_info()
self._state = node_info.get("appVersion")
# convert values to raw string formats
self._attr.update({k: str(v) for k, v in node_info.items()})
|
import subprocess
import string
import diamond.collector
class ScribeCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(ScribeCollector, self).get_default_config_help()
config_help.update({
'scribe_ctrl_bin': 'Path to scribe_ctrl binary',
'scribe_port': 'Scribe port',
})
return config_help
def get_default_config(self):
config = super(ScribeCollector, self).get_default_config()
config.update({
'path': 'scribe',
'scribe_ctrl_bin': self.find_binary('/usr/sbin/scribe_ctrl'),
'scribe_port': None,
})
return config
def key_to_metric(self, key):
"""Replace all non-letter characters with underscores"""
return ''.join(l if l in string.letters else '_' for l in key)
def get_scribe_ctrl_output(self):
cmd = [self.config['scribe_ctrl_bin'], 'counters']
if self.config['scribe_port'] is not None:
cmd.append(self.config['scribe_port'])
self.log.debug("Running command %r", cmd)
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
self.log.exception("Unable to run %r", cmd)
return ""
stdout, stderr = p.communicate()
if p.wait() != 0:
self.log.warning("Command failed %r", cmd)
self.log.warning(stderr)
return stdout
def get_scribe_stats(self):
output = self.get_scribe_ctrl_output()
data = {}
for line in output.splitlines():
key, val = line.rsplit(':', 1)
metric = self.key_to_metric(key)
data[metric] = int(val)
return data.items()
def collect(self):
for stat, val in self.get_scribe_stats():
self.publish(stat, val)
|
import numpy as np
from .. utils import logger, verbose, _pl
@verbose
def peak_finder(x0, thresh=None, extrema=1, verbose=None):
"""Noise-tolerant fast peak-finding algorithm.
Parameters
----------
x0 : 1d array
A real vector from the maxima will be found (required).
thresh : float
The amount above surrounding data for a peak to be
identified (default = (max(x0)-min(x0))/4). Larger values mean
the algorithm is more selective in finding peaks.
extrema : {-1, 1}
1 if maxima are desired, -1 if minima are desired
(default = maxima, 1).
%(verbose)s
Returns
-------
peak_loc : array
The indices of the identified peaks in x0.
peak_mag : array
The magnitude of the identified peaks.
Notes
-----
If repeated values are found the first is identified as the peak.
Conversion from initial Matlab code from:
Nathanael C. Yoder ([email protected])
Examples
--------
>>> import numpy as np
>>> from mne.preprocessing import peak_finder
>>> t = np.arange(0, 3, 0.01)
>>> x = np.sin(np.pi*t) - np.sin(0.5*np.pi*t)
>>> peak_locs, peak_mags = peak_finder(x) # doctest: +SKIP
>>> peak_locs # doctest: +SKIP
array([36, 260]) # doctest: +SKIP
>>> peak_mags # doctest: +SKIP
array([0.36900026, 1.76007351]) # doctest: +SKIP
"""
x0 = np.asanyarray(x0)
s = x0.size
if x0.ndim >= 2 or s == 0:
raise ValueError('The input data must be a non empty 1D vector')
if thresh is None:
thresh = (np.max(x0) - np.min(x0)) / 4
logger.debug('Peak finder automatic threshold: %0.2g' % (thresh,))
assert extrema in [-1, 1]
if extrema == -1:
x0 = extrema * x0 # Make it so we are finding maxima regardless
dx0 = np.diff(x0) # Find derivative
# This is so we find the first of repeated values
dx0[dx0 == 0] = -np.finfo(float).eps
# Find where the derivative changes sign
ind = np.where(dx0[:-1:] * dx0[1::] < 0)[0] + 1
# Include endpoints in potential peaks and valleys
x = np.concatenate((x0[:1], x0[ind], x0[-1:]))
ind = np.concatenate(([0], ind, [s - 1]))
del x0
# x only has the peaks, valleys, and endpoints
length = x.size
min_mag = np.min(x)
if length > 2: # Function with peaks and valleys
# Set initial parameters for loop
temp_mag = min_mag
found_peak = False
left_min = min_mag
# Deal with first point a little differently since tacked it on
# Calculate the sign of the derivative since we taked the first point
# on it does not necessarily alternate like the rest.
signDx = np.sign(np.diff(x[:3]))
if signDx[0] <= 0: # The first point is larger or equal to the second
ii = -1
if signDx[0] == signDx[1]: # Want alternating signs
x = np.concatenate((x[:1], x[2:]))
ind = np.concatenate((ind[:1], ind[2:]))
length -= 1
else: # First point is smaller than the second
ii = 0
if signDx[0] == signDx[1]: # Want alternating signs
x = x[1:]
ind = ind[1:]
length -= 1
# Preallocate max number of maxima
maxPeaks = int(np.ceil(length / 2.0))
peak_loc = np.zeros(maxPeaks, dtype=np.int64)
peak_mag = np.zeros(maxPeaks)
c_ind = 0
# Loop through extrema which should be peaks and then valleys
while ii < (length - 1):
ii += 1 # This is a peak
# Reset peak finding if we had a peak and the next peak is bigger
# than the last or the left min was small enough to reset.
if found_peak and ((x[ii] > peak_mag[-1]) or
(left_min < peak_mag[-1] - thresh)):
temp_mag = min_mag
found_peak = False
# Make sure we don't iterate past the length of our vector
if ii == length - 1:
break # We assign the last point differently out of the loop
# Found new peak that was lager than temp mag and threshold larger
# than the minimum to its left.
if (x[ii] > temp_mag) and (x[ii] > left_min + thresh):
temp_loc = ii
temp_mag = x[ii]
ii += 1 # Move onto the valley
# Come down at least thresh from peak
if not found_peak and (temp_mag > (thresh + x[ii])):
found_peak = True # We have found a peak
left_min = x[ii]
peak_loc[c_ind] = temp_loc # Add peak to index
peak_mag[c_ind] = temp_mag
c_ind += 1
elif x[ii] < left_min: # New left minima
left_min = x[ii]
# Check end point
if (x[-1] > temp_mag) and (x[-1] > (left_min + thresh)):
peak_loc[c_ind] = length - 1
peak_mag[c_ind] = x[-1]
c_ind += 1
elif not found_peak and temp_mag > min_mag:
# Check if we still need to add the last point
peak_loc[c_ind] = temp_loc
peak_mag[c_ind] = temp_mag
c_ind += 1
# Create output
peak_inds = ind[peak_loc[:c_ind]]
peak_mags = peak_mag[:c_ind]
else: # This is a monotone function where an endpoint is the only peak
x_ind = np.argmax(x)
peak_mags = x[x_ind]
if peak_mags > (min_mag + thresh):
peak_inds = ind[x_ind]
else:
peak_mags = []
peak_inds = []
# Change sign of data if was finding minima
if extrema < 0:
peak_mags *= -1.0
# ensure output type array
if not isinstance(peak_inds, np.ndarray):
peak_inds = np.atleast_1d(peak_inds).astype('int64')
if not isinstance(peak_mags, np.ndarray):
peak_mags = np.atleast_1d(peak_mags).astype('float64')
# Plot if no output desired
if len(peak_inds) == 0:
logger.info('No significant peaks found')
else:
logger.info('Found %d significant peak%s'
% (len(peak_inds), _pl(peak_inds)))
return peak_inds, peak_mags
|
import doctest
import sys
import warnings
try:
__file__
except NameError:
__file__ = sys.argv[0]
from logilab.common.testlib import TestCase, unittest_main
from logilab.common import modutils
from os import path, getcwd, sep
from logilab import common
from logilab.common import tree
sys.path.insert(0, path.dirname(__file__))
DATADIR = path.join(path.dirname(__file__), 'data')
class ModutilsTestCase(TestCase):
def setUp(self):
super(ModutilsTestCase, self).setUp()
self.__common_in_path = common.__path__[0] in sys.path
if self.__common_in_path:
sys.path.remove(common.__path__[0])
def tearDown(self):
if self.__common_in_path:
sys.path.insert(0, common.__path__[0])
super(ModutilsTestCase, self).tearDown()
class ModuleFileTC(ModutilsTestCase):
package = "mypypa"
def setUp(self):
super(ModuleFileTC, self).setUp()
for k in list(sys.path_importer_cache.keys()):
if 'MyPyPa' in k:
del sys.path_importer_cache[k]
def test_find_zipped_module(self):
mtype, mfile = modutils._module_file([self.package], [path.join(DATADIR, 'MyPyPa-0.1.0.zip')])
self.assertEqual(mtype, modutils.ZIPFILE)
self.assertEqual(mfile.split(sep)[-4:], ["test", "data", "MyPyPa-0.1.0.zip", self.package])
def test_find_egg_module(self):
mtype, mfile = modutils._module_file([self.package], [path.join(DATADIR, 'MyPyPa-0.1.0-py2.5.egg')])
self.assertEqual(mtype, modutils.ZIPFILE)
self.assertEqual(mfile.split(sep)[-4:], ["test", "data", "MyPyPa-0.1.0-py2.5.egg", self.package])
class load_module_from_name_tc(ModutilsTestCase):
""" load a python module from it's name """
def test_knownValues_load_module_from_name_1(self):
self.assertEqual(modutils.load_module_from_name('sys'), sys)
def test_knownValues_load_module_from_name_2(self):
self.assertEqual(modutils.load_module_from_name('os.path'), path)
def test_raise_load_module_from_name_1(self):
self.assertRaises(ImportError,
modutils.load_module_from_name, 'os.path', use_sys=0)
class get_module_part_tc(ModutilsTestCase):
"""given a dotted name return the module part of the name"""
def test_knownValues_get_module_part_1(self):
self.assertEqual(modutils.get_module_part('logilab.common.modutils'),
'logilab.common.modutils')
def test_knownValues_get_module_part_2(self):
self.assertEqual(modutils.get_module_part('logilab.common.modutils.get_module_part'),
'logilab.common.modutils')
def test_knownValues_get_module_part_3(self):
"""relative import from given file"""
self.assertEqual(modutils.get_module_part('interface.Interface',
modutils.__file__), 'interface')
def test_knownValues_get_compiled_module_part(self):
self.assertEqual(modutils.get_module_part('math.log10'), 'math')
self.assertEqual(modutils.get_module_part('math.log10', __file__), 'math')
def test_knownValues_get_builtin_module_part(self):
self.assertEqual(modutils.get_module_part('sys.path'), 'sys')
self.assertEqual(modutils.get_module_part('sys.path', '__file__'), 'sys')
def test_get_module_part_exception(self):
self.assertRaises(ImportError, modutils.get_module_part, 'unknown.module',
modutils.__file__)
class modpath_from_file_tc(ModutilsTestCase):
""" given an absolute file path return the python module's path as a list """
def test_knownValues_modpath_from_file_1(self):
with warnings.catch_warnings(record=True) as warns:
self.assertEqual(modutils.modpath_from_file(modutils.__file__),
['logilab', 'common', 'modutils'])
self.assertIn('you should avoid using modpath_from_file()',
[str(w.message) for w in warns])
def test_knownValues_modpath_from_file_2(self):
self.assertEqual(modutils.modpath_from_file('unittest_modutils.py',
{getcwd(): 'arbitrary.pkg'}),
['arbitrary', 'pkg', 'unittest_modutils'])
def test_raise_modpath_from_file_Exception(self):
self.assertRaises(Exception, modutils.modpath_from_file, '/turlututu')
class load_module_from_path_tc(ModutilsTestCase):
def test_do_not_load_twice(self):
sys.path.insert(0, self.datadir)
foo = modutils.load_module_from_modpath(['lmfp', 'foo'])
lmfp = modutils.load_module_from_modpath(['lmfp'])
self.assertEqual(len(sys.just_once), 1)
sys.path.pop(0)
del sys.just_once
class file_from_modpath_tc(ModutilsTestCase):
"""given a mod path (i.e. splited module / package name), return the
corresponding file, giving priority to source file over precompiled file
if it exists"""
def test_site_packages(self):
from pytz import tzinfo
self.assertEqual(path.realpath(modutils.file_from_modpath(['pytz', 'tzinfo'])),
path.realpath(tzinfo.__file__.replace('.pyc', '.py')))
def test_std_lib(self):
from os import path
self.assertEqual(path.realpath(modutils.file_from_modpath(['os', 'path']).replace('.pyc', '.py')),
path.realpath(path.__file__.replace('.pyc', '.py')))
def test_xmlplus(self):
try:
# don't fail if pyxml isn't installed
from xml.dom import ext
except ImportError:
pass
else:
self.assertEqual(path.realpath(modutils.file_from_modpath(['xml', 'dom', 'ext']).replace('.pyc', '.py')),
path.realpath(ext.__file__.replace('.pyc', '.py')))
def test_builtin(self):
self.assertEqual(modutils.file_from_modpath(['sys']),
None)
def test_unexisting(self):
self.assertRaises(ImportError, modutils.file_from_modpath, ['turlututu'])
class get_source_file_tc(ModutilsTestCase):
def test(self):
from os import path
self.assertEqual(modutils.get_source_file(path.__file__),
path.__file__.replace('.pyc', '.py'))
def test_raise(self):
self.assertRaises(modutils.NoSourceFile, modutils.get_source_file, 'whatever')
class is_standard_module_tc(ModutilsTestCase):
"""
return true if the module may be considered as a module from the standard
library
"""
def test_builtins(self):
if sys.version_info < (3, 0):
self.assertEqual(modutils.is_standard_module('__builtin__'), True)
self.assertEqual(modutils.is_standard_module('builtins'), False)
else:
self.assertEqual(modutils.is_standard_module('__builtin__'), False)
self.assertEqual(modutils.is_standard_module('builtins'), True)
def test_builtin(self):
self.assertEqual(modutils.is_standard_module('sys'), True)
def test_nonstandard(self):
self.assertEqual(modutils.is_standard_module('logilab'), False)
def test_unknown(self):
self.assertEqual(modutils.is_standard_module('unknown'), False)
def test_4(self):
self.assertEqual(modutils.is_standard_module('marshal'), True)
self.assertEqual(modutils.is_standard_module('pickle'), True)
self.assertEqual(modutils.is_standard_module('email'), True)
self.assertEqual(modutils.is_standard_module('StringIO'), sys.version_info < (3, 0))
venv_py3 = sys.version_info[0] >= 3 and hasattr(sys, 'real_prefix')
if not venv_py3:
# those modules are symlinked by virtualenv (but not by python's venv)
self.assertEqual(modutils.is_standard_module('hashlib'), True)
self.assertEqual(modutils.is_standard_module('io'), True)
def test_custom_path(self):
self.assertEqual(modutils.is_standard_module('data.module', (DATADIR,)), True)
self.assertEqual(modutils.is_standard_module('data.module', (path.abspath(DATADIR),)), True)
def test_failing_border_cases(self):
# using a subpackage/submodule path as std_path argument
self.assertEqual(modutils.is_standard_module('logilab.common', common.__path__), False)
# using a module + object name as modname argument
self.assertEqual(modutils.is_standard_module('sys.path'), True)
# this is because only the first package/module is considered
self.assertEqual(modutils.is_standard_module('sys.whatever'), True)
self.assertEqual(modutils.is_standard_module('logilab.whatever', common.__path__), False)
class is_relative_tc(ModutilsTestCase):
def test_knownValues_is_relative_1(self):
self.assertEqual(modutils.is_relative('modutils', common.__path__[0]), True)
def test_knownValues_is_relative_2(self):
self.assertEqual(modutils.is_relative('modutils', tree.__file__), True)
def test_knownValues_is_relative_3(self):
self.assertEqual(modutils.is_relative('logilab.common.modutils',
common.__path__[0]), False)
class get_modules_tc(ModutilsTestCase):
def test_knownValues_get_modules_1(self): # XXXFIXME: TOWRITE
"""given a directory return a list of all available python modules, even
in subdirectories
"""
import data.find_test as data
mod_path = ("data", 'find_test')
modules = sorted(modutils.get_modules(path.join(*mod_path),
data.__path__[0]))
self.assertSetEqual(set(modules),
set([ '.'.join(mod_path + (mod, )) for mod in ('module', 'module2',
'noendingnewline', 'nonregr')]))
class get_modules_files_tc(ModutilsTestCase):
def test_knownValues_get_module_files_1(self): # XXXFIXME: TOWRITE
"""given a directory return a list of all available python module's files, even
in subdirectories
"""
import data
modules = sorted(modutils.get_module_files(path.join(DATADIR, 'find_test'),
data.__path__[0]))
self.assertEqual(modules,
[path.join(DATADIR, 'find_test', x) for x in ['__init__.py', 'module.py', 'module2.py', 'noendingnewline.py', 'nonregr.py']])
def test_load_module_set_attribute(self):
import logilab.common.fileutils
import logilab
del logilab.common.fileutils
del sys.modules['logilab.common.fileutils']
m = modutils.load_module_from_modpath(['logilab', 'common', 'fileutils'])
self.assertTrue( hasattr(logilab, 'common') )
self.assertTrue( hasattr(logilab.common, 'fileutils') )
self.assertTrue( m is logilab.common.fileutils )
def load_tests(loader, tests, ignore):
from logilab.common import modutils
tests.addTests(doctest.DocTestSuite(modutils))
return tests
if __name__ == '__main__':
unittest_main()
|
from homeassistant.components.air_quality import (
ATTR_ATTRIBUTION,
ATTR_N2O,
ATTR_OZONE,
ATTR_PM_10,
)
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
)
from homeassistant.setup import async_setup_component
async def test_state(hass):
"""Test Air Quality state."""
config = {"air_quality": {"platform": "demo"}}
assert await async_setup_component(hass, "air_quality", config)
await hass.async_block_till_done()
state = hass.states.get("air_quality.demo_air_quality_home")
assert state is not None
assert state.state == "14"
async def test_attributes(hass):
"""Test Air Quality attributes."""
config = {"air_quality": {"platform": "demo"}}
assert await async_setup_component(hass, "air_quality", config)
await hass.async_block_till_done()
state = hass.states.get("air_quality.demo_air_quality_office")
assert state is not None
data = state.attributes
assert data.get(ATTR_PM_10) == 16
assert data.get(ATTR_N2O) is None
assert data.get(ATTR_OZONE) is None
assert data.get(ATTR_ATTRIBUTION) == "Powered by Home Assistant"
assert (
data.get(ATTR_UNIT_OF_MEASUREMENT) == CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
)
|
import contextlib
import socket
import xml.etree.ElementTree as ET
from http import client
from radicale import app, httputils
from radicale import item as radicale_item
from radicale import storage, xmlutils
from radicale.log import logger
def xml_proppatch(base_prefix, path, xml_request, collection):
"""Read and answer PROPPATCH requests.
Read rfc4918-9.2 for info.
"""
multistatus = ET.Element(xmlutils.make_clark("D:multistatus"))
response = ET.Element(xmlutils.make_clark("D:response"))
multistatus.append(response)
href = ET.Element(xmlutils.make_clark("D:href"))
href.text = xmlutils.make_href(base_prefix, path)
response.append(href)
# Create D:propstat element for props with status 200 OK
propstat = ET.Element(xmlutils.make_clark("D:propstat"))
status = ET.Element(xmlutils.make_clark("D:status"))
status.text = xmlutils.make_response(200)
props_ok = ET.Element(xmlutils.make_clark("D:prop"))
propstat.append(props_ok)
propstat.append(status)
response.append(propstat)
new_props = collection.get_meta()
for short_name, value in xmlutils.props_from_request(xml_request).items():
if value is None:
with contextlib.suppress(KeyError):
del new_props[short_name]
else:
new_props[short_name] = value
props_ok.append(ET.Element(xmlutils.make_clark(short_name)))
radicale_item.check_and_sanitize_props(new_props)
collection.set_meta(new_props)
return multistatus
class ApplicationProppatchMixin:
def do_PROPPATCH(self, environ, base_prefix, path, user):
"""Manage PROPPATCH request."""
access = app.Access(self._rights, user, path)
if not access.check("w"):
return httputils.NOT_ALLOWED
try:
xml_content = self._read_xml_request_body(environ)
except RuntimeError as e:
logger.warning(
"Bad PROPPATCH request on %r: %s", path, e, exc_info=True)
return httputils.BAD_REQUEST
except socket.timeout:
logger.debug("Client timed out", exc_info=True)
return httputils.REQUEST_TIMEOUT
with self._storage.acquire_lock("w", user):
item = next(self._storage.discover(path), None)
if not item:
return httputils.NOT_FOUND
if not access.check("w", item):
return httputils.NOT_ALLOWED
if not isinstance(item, storage.BaseCollection):
return httputils.FORBIDDEN
headers = {"DAV": httputils.DAV_HEADERS,
"Content-Type": "text/xml; charset=%s" % self._encoding}
try:
xml_answer = xml_proppatch(base_prefix, path, xml_content,
item)
except ValueError as e:
logger.warning(
"Bad PROPPATCH request on %r: %s", path, e, exc_info=True)
return httputils.BAD_REQUEST
return client.MULTI_STATUS, headers, self._xml_response(xml_answer)
|
import logging
from datetime import datetime, timedelta
from typing import Callable, List, Optional, Set, Union
import discord
from redbot.core import checks, commands
from redbot.core.bot import Red
from redbot.core.i18n import Translator, cog_i18n
from redbot.core.utils.chat_formatting import humanize_number
from redbot.core.utils.mod import slow_deletion, mass_purge
from redbot.core.utils.predicates import MessagePredicate
from .checks import check_self_permissions
from .converters import PositiveInt, RawMessageIds, positive_int
_ = Translator("Cleanup", __file__)
log = logging.getLogger("red.cleanup")
@cog_i18n(_)
class Cleanup(commands.Cog):
"""This cog contains commands used for "cleaning up" (deleting) messages.
This is designed as a moderator tool and offers many convenient use cases.
All cleanup commands only apply to the channel the command is executed in.
Messages older than two weeks cannot be mass deleted.
This is a limitation of the API.
"""
def __init__(self, bot: Red):
super().__init__()
self.bot = bot
async def red_delete_data_for_user(self, **kwargs):
""" Nothing to delete """
return
@staticmethod
async def check_100_plus(ctx: commands.Context, number: int) -> bool:
"""
Called when trying to delete more than 100 messages at once.
Prompts the user to choose whether they want to continue or not.
Tries its best to cleanup after itself if the response is positive.
"""
if ctx.assume_yes:
return True
prompt = await ctx.send(
_("Are you sure you want to delete {number} messages? (y/n)").format(
number=humanize_number(number)
)
)
response = await ctx.bot.wait_for("message", check=MessagePredicate.same_context(ctx))
if response.content.lower().startswith("y"):
await prompt.delete()
try:
await response.delete()
except discord.HTTPException:
pass
return True
else:
await ctx.send(_("Cancelled."))
return False
@staticmethod
async def get_messages_for_deletion(
*,
channel: discord.TextChannel,
number: Optional[PositiveInt] = None,
check: Callable[[discord.Message], bool] = lambda x: True,
limit: Optional[PositiveInt] = None,
before: Union[discord.Message, datetime] = None,
after: Union[discord.Message, datetime] = None,
delete_pinned: bool = False,
) -> List[discord.Message]:
"""
Gets a list of messages meeting the requirements to be deleted.
Generally, the requirements are:
- We don't have the number of messages to be deleted already
- The message passes a provided check (if no check is provided,
this is automatically true)
- The message is less than 14 days old
- The message is not pinned
Warning: Due to the way the API hands messages back in chunks,
passing after and a number together is not advisable.
If you need to accomplish this, you should filter messages on
the entire applicable range, rather than use this utility.
"""
# This isn't actually two weeks ago to allow some wiggle room on API limits
two_weeks_ago = datetime.utcnow() - timedelta(days=14, minutes=-5)
def message_filter(message):
return (
check(message)
and message.created_at > two_weeks_ago
and (delete_pinned or not message.pinned)
)
if after:
if isinstance(after, discord.Message):
after = after.created_at
after = max(after, two_weeks_ago)
collected = []
async for message in channel.history(
limit=limit, before=before, after=after, oldest_first=False
):
if message.created_at < two_weeks_ago:
break
if message_filter(message):
collected.append(message)
if number is not None and number <= len(collected):
break
return collected
@commands.group()
async def cleanup(self, ctx: commands.Context):
"""Base command for deleting messages."""
pass
@cleanup.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(manage_messages=True)
async def text(
self, ctx: commands.Context, text: str, number: positive_int, delete_pinned: bool = False
):
"""Delete the last X messages matching the specified text.
Example:
- `[p]cleanup text "test" 5`
Remember to use double quotes.
**Arguments:**
- `<number>` The max number of messages to cleanup. Must be a positive integer.
- `<delete_pinned>` Whether to delete pinned messages or not. Defaults to False
"""
channel = ctx.channel
author = ctx.author
if number > 100:
cont = await self.check_100_plus(ctx, number)
if not cont:
return
def check(m):
if text in m.content:
return True
else:
return False
to_delete = await self.get_messages_for_deletion(
channel=channel,
number=number,
check=check,
before=ctx.message,
delete_pinned=delete_pinned,
)
to_delete.append(ctx.message)
reason = "{}({}) deleted {} messages containing '{}' in channel {}.".format(
author.name,
author.id,
humanize_number(len(to_delete), override_locale="en_us"),
text,
channel.id,
)
log.info(reason)
await mass_purge(to_delete, channel)
@cleanup.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(manage_messages=True)
async def user(
self, ctx: commands.Context, user: str, number: positive_int, delete_pinned: bool = False
):
"""Delete the last X messages from a specified user.
Examples:
- `[p]cleanup user @Twentysix 2`
- `[p]cleanup user Red 6`
**Arguments:**
- `<user>` The user whose messages are to be cleaned up.
- `<number>` The max number of messages to cleanup. Must be a positive integer.
- `<delete_pinned>` Whether to delete pinned messages or not. Defaults to False
"""
channel = ctx.channel
member = None
try:
member = await commands.MemberConverter().convert(ctx, user)
except commands.BadArgument:
try:
_id = int(user)
except ValueError:
raise commands.BadArgument()
else:
_id = member.id
author = ctx.author
if number > 100:
cont = await self.check_100_plus(ctx, number)
if not cont:
return
def check(m):
if m.author.id == _id:
return True
else:
return False
to_delete = await self.get_messages_for_deletion(
channel=channel,
number=number,
check=check,
before=ctx.message,
delete_pinned=delete_pinned,
)
to_delete.append(ctx.message)
reason = (
"{}({}) deleted {} messages "
" made by {}({}) in channel {}."
"".format(
author.name,
author.id,
humanize_number(len(to_delete), override_locale="en_US"),
member or "???",
_id,
channel.name,
)
)
log.info(reason)
await mass_purge(to_delete, channel)
@cleanup.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(manage_messages=True)
async def after(
self, ctx: commands.Context, message_id: RawMessageIds, delete_pinned: bool = False
):
"""Delete all messages after a specified message.
To get a message id, enable developer mode in Discord's
settings, 'appearance' tab. Then right click a message
and copy its id.
**Arguments:**
- `<message_id>` The id of the message to cleanup after. This message won't be deleted.
- `<delete_pinned>` Whether to delete pinned messages or not. Defaults to False
"""
channel = ctx.channel
author = ctx.author
try:
after = await channel.fetch_message(message_id)
except discord.NotFound:
return await ctx.send(_("Message not found."))
to_delete = await self.get_messages_for_deletion(
channel=channel, number=None, after=after, delete_pinned=delete_pinned
)
reason = "{}({}) deleted {} messages in channel {}.".format(
author.name,
author.id,
humanize_number(len(to_delete), override_locale="en_US"),
channel.name,
)
log.info(reason)
await mass_purge(to_delete, channel)
@cleanup.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(manage_messages=True)
async def before(
self,
ctx: commands.Context,
message_id: RawMessageIds,
number: positive_int,
delete_pinned: bool = False,
):
"""Deletes X messages before the specified message.
To get a message id, enable developer mode in Discord's
settings, 'appearance' tab. Then right click a message
and copy its id.
**Arguments:**
- `<message_id>` The id of the message to cleanup before. This message won't be deleted.
- `<number>` The max number of messages to cleanup. Must be a positive integer.
- `<delete_pinned>` Whether to delete pinned messages or not. Defaults to False
"""
channel = ctx.channel
author = ctx.author
try:
before = await channel.fetch_message(message_id)
except discord.NotFound:
return await ctx.send(_("Message not found."))
to_delete = await self.get_messages_for_deletion(
channel=channel, number=number, before=before, delete_pinned=delete_pinned
)
to_delete.append(ctx.message)
reason = "{}({}) deleted {} messages in channel {}.".format(
author.name,
author.id,
humanize_number(len(to_delete), override_locale="en_US"),
channel.name,
)
log.info(reason)
await mass_purge(to_delete, channel)
@cleanup.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(manage_messages=True)
async def between(
self,
ctx: commands.Context,
one: RawMessageIds,
two: RawMessageIds,
delete_pinned: bool = False,
):
"""Delete the messages between Message One and Message Two, providing the messages IDs.
The first message ID should be the older message and the second one the newer.
Example:
- `[p]cleanup between 123456789123456789 987654321987654321`
**Arguments:**
- `<one>` The id of the message to cleanup after. This message won't be deleted.
- `<two>` The id of the message to cleanup before. This message won't be deleted.
- `<delete_pinned>` Whether to delete pinned messages or not. Defaults to False
"""
channel = ctx.channel
author = ctx.author
try:
mone = await channel.fetch_message(one)
except discord.errors.NotFound:
return await ctx.send(
_("Could not find a message with the ID of {id}.".format(id=one))
)
try:
mtwo = await channel.fetch_message(two)
except discord.errors.NotFound:
return await ctx.send(
_("Could not find a message with the ID of {id}.".format(id=two))
)
to_delete = await self.get_messages_for_deletion(
channel=channel, before=mtwo, after=mone, delete_pinned=delete_pinned
)
to_delete.append(ctx.message)
reason = "{}({}) deleted {} messages in channel {}.".format(
author.name,
author.id,
humanize_number(len(to_delete), override_locale="en_US"),
channel.name,
)
log.info(reason)
await mass_purge(to_delete, channel)
@cleanup.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(manage_messages=True)
async def messages(
self, ctx: commands.Context, number: positive_int, delete_pinned: bool = False
):
"""Delete the last X messages.
Example:
- `[p]cleanup messages 26`
**Arguments:**
- `<number>` The max number of messages to cleanup. Must be a positive integer.
- `<delete_pinned>` Whether to delete pinned messages or not. Defaults to False
"""
channel = ctx.channel
author = ctx.author
if number > 100:
cont = await self.check_100_plus(ctx, number)
if not cont:
return
to_delete = await self.get_messages_for_deletion(
channel=channel, number=number, before=ctx.message, delete_pinned=delete_pinned
)
to_delete.append(ctx.message)
reason = "{}({}) deleted {} messages in channel {}.".format(
author.name, author.id, len(to_delete), channel.name
)
log.info(reason)
await mass_purge(to_delete, channel)
@cleanup.command(name="bot")
@commands.guild_only()
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(manage_messages=True)
async def cleanup_bot(
self, ctx: commands.Context, number: positive_int, delete_pinned: bool = False
):
"""Clean up command messages and messages from the bot.
Can only cleanup custom commands and alias commands if those cogs are loaded.
**Arguments:**
- `<number>` The max number of messages to cleanup. Must be a positive integer.
- `<delete_pinned>` Whether to delete pinned messages or not. Defaults to False
"""
channel = ctx.channel
author = ctx.message.author
if number > 100:
cont = await self.check_100_plus(ctx, number)
if not cont:
return
prefixes = await self.bot.get_prefix(ctx.message) # This returns all server prefixes
if isinstance(prefixes, str):
prefixes = [prefixes]
# In case some idiot sets a null prefix
if "" in prefixes:
prefixes.remove("")
cc_cog = self.bot.get_cog("CustomCommands")
if cc_cog is not None:
command_names: Set[str] = await cc_cog.get_command_names(ctx.guild)
is_cc = lambda name: name in command_names
else:
is_cc = lambda name: False
alias_cog = self.bot.get_cog("Alias")
if alias_cog is not None:
alias_names: Set[str] = set(
a.name for a in await alias_cog._aliases.get_global_aliases()
) | set(a.name for a in await alias_cog._aliases.get_guild_aliases(ctx.guild))
is_alias = lambda name: name in alias_names
else:
is_alias = lambda name: False
bot_id = self.bot.user.id
def check(m):
if m.author.id == bot_id:
return True
elif m == ctx.message:
return True
p = discord.utils.find(m.content.startswith, prefixes)
if p and len(p) > 0:
cmd_name = m.content[len(p) :].split(" ")[0]
return (
bool(self.bot.get_command(cmd_name)) or is_alias(cmd_name) or is_cc(cmd_name)
)
return False
to_delete = await self.get_messages_for_deletion(
channel=channel,
number=number,
check=check,
before=ctx.message,
delete_pinned=delete_pinned,
)
to_delete.append(ctx.message)
reason = (
"{}({}) deleted {} "
" command messages in channel {}."
"".format(
author.name,
author.id,
humanize_number(len(to_delete), override_locale="en_US"),
channel.name,
)
)
log.info(reason)
await mass_purge(to_delete, channel)
@cleanup.command(name="self")
@check_self_permissions()
async def cleanup_self(
self,
ctx: commands.Context,
number: positive_int,
match_pattern: str = None,
delete_pinned: bool = False,
):
"""Clean up messages owned by the bot.
By default, all messages are cleaned. If a second argument is specified,
it is used for pattern matching - only messages containing the given text will be deleted.
Examples:
- `[p]cleanup self 6`
- `[p]cleanup self 10 Pong`
- `[p]cleanup self 7 "" True`
**Arguments:**
- `<number>` The max number of messages to cleanup. Must be a positive integer.
- `<match_pattern>` The text that messages must contain to be deleted. Use "" to skip this.
- `<delete_pinned>` Whether to delete pinned messages or not. Defaults to False
"""
channel = ctx.channel
author = ctx.message.author
if number > 100:
cont = await self.check_100_plus(ctx, number)
if not cont:
return
# You can always delete your own messages, this is needed to purge
can_mass_purge = False
if type(author) is discord.Member:
me = ctx.guild.me
can_mass_purge = channel.permissions_for(me).manage_messages
if match_pattern:
def content_match(c):
return match_pattern in c
else:
def content_match(_):
return True
def check(m):
if m.author.id != self.bot.user.id:
return False
elif content_match(m.content):
return True
return False
to_delete = await self.get_messages_for_deletion(
channel=channel,
number=number,
check=check,
before=ctx.message,
delete_pinned=delete_pinned,
)
if can_mass_purge:
to_delete.append(ctx.message)
if ctx.guild:
channel_name = "channel " + channel.name
else:
channel_name = str(channel)
reason = (
"{}({}) deleted {} messages "
"sent by the bot in {}."
"".format(
author.name,
author.id,
humanize_number(len(to_delete), override_locale="en_US"),
channel_name,
)
)
log.info(reason)
if can_mass_purge:
await mass_purge(to_delete, channel)
else:
await slow_deletion(to_delete)
@cleanup.command(name="spam")
@commands.guild_only()
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(manage_messages=True)
async def cleanup_spam(self, ctx: commands.Context, number: positive_int = PositiveInt(50)):
"""Deletes duplicate messages in the channel from the last X messages and keeps only one copy.
Defaults to 50.
**Arguments:**
- `<number>` The number of messages to check for duplicates. Must be a positive integer.
"""
msgs = []
spam = []
def check(m):
if m.attachments:
return False
c = (m.author.id, m.content, [e.to_dict() for e in m.embeds])
if c in msgs:
spam.append(m)
return True
else:
msgs.append(c)
return False
to_delete = await self.get_messages_for_deletion(
channel=ctx.channel, limit=number, check=check, before=ctx.message
)
if len(to_delete) > 100:
cont = await self.check_100_plus(ctx, len(to_delete))
if not cont:
return
log.info(
"%s (%s) deleted %s spam messages in channel %s (%s).",
ctx.author,
ctx.author.id,
len(to_delete),
ctx.channel,
ctx.channel.id,
)
to_delete.append(ctx.message)
await mass_purge(to_delete, ctx.channel)
|
import pytest
import voluptuous as vol
from homeassistant.components.sql.sensor import validate_sql_select
from homeassistant.const import STATE_UNKNOWN
from homeassistant.setup import async_setup_component
async def test_query(hass):
"""Test the SQL sensor."""
config = {
"sensor": {
"platform": "sql",
"db_url": "sqlite://",
"queries": [
{
"name": "count_tables",
"query": "SELECT 5 as value",
"column": "value",
}
],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.count_tables")
assert state.state == "5"
assert state.attributes["value"] == 5
async def test_invalid_query(hass):
"""Test the SQL sensor for invalid queries."""
with pytest.raises(vol.Invalid):
validate_sql_select("DROP TABLE *")
config = {
"sensor": {
"platform": "sql",
"db_url": "sqlite://",
"queries": [
{
"name": "count_tables",
"query": "SELECT * value FROM sqlite_master;",
"column": "value",
}
],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.count_tables")
assert state.state == STATE_UNKNOWN
|
import os.path
from nikola.plugin_categories import ShortcodePlugin
class ThumbnailShortcode(ShortcodePlugin):
"""Plugin for thumbnail directive."""
name = "thumbnail"
def handler(self, uri, alt=None, align=None, linktitle=None, title=None, imgclass=None, figclass=None, site=None, data=None, lang=None, post=None):
"""Create HTML for thumbnail."""
if uri.endswith('.svg'):
# the ? at the end makes docutil output an <img> instead of an object for the svg, which lightboxes may require
src = '.thumbnail'.join(os.path.splitext(uri)) + '?'
else:
src = '.thumbnail'.join(os.path.splitext(uri))
if imgclass is None:
imgclass = ''
if figclass is None:
figclass = ''
if align and data:
figclass += ' align-{0}'.format(align)
elif align:
imgclass += ' align-{0}'.format(align)
output = '<a href="{0}" class="image-reference"'.format(uri)
if linktitle:
output += ' title="{0}"'.format(linktitle)
output += '><img src="{0}"'.format(src)
for item, name in ((alt, 'alt'), (title, 'title'), (imgclass, 'class')):
if item:
output += ' {0}="{1}"'.format(name, item)
output += '></a>'
if data:
output = '<div class="figure {0}">{1}{2}</div>'.format(figclass, output, data)
return output, []
|
from homeassistant.helpers.entity import Entity
from .const import (
ATTR_API_DATA_FIELD,
ATTR_DEVICE_CLASS,
ATTR_ICON,
ATTR_LABEL,
ATTR_UNIT,
DOMAIN,
ROUTER_DEFAULT_MODEL,
ROUTER_DEFAULT_NAME,
ROUTER_MANUFACTURER,
SENSOR_TYPES,
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add Vilfo Router entities from a config_entry."""
vilfo = hass.data[DOMAIN][config_entry.entry_id]
sensors = []
for sensor_type in SENSOR_TYPES:
sensors.append(VilfoRouterSensor(sensor_type, vilfo))
async_add_entities(sensors, True)
class VilfoRouterSensor(Entity):
"""Define a Vilfo Router Sensor."""
def __init__(self, sensor_type, api):
"""Initialize."""
self.api = api
self.sensor_type = sensor_type
self._device_info = {
"identifiers": {(DOMAIN, api.host, api.mac_address)},
"name": ROUTER_DEFAULT_NAME,
"manufacturer": ROUTER_MANUFACTURER,
"model": ROUTER_DEFAULT_MODEL,
"sw_version": api.firmware_version,
}
self._unique_id = f"{self.api.unique_id}_{self.sensor_type}"
self._state = None
@property
def available(self):
"""Return whether the sensor is available or not."""
return self.api.available
@property
def device_info(self):
"""Return the device info."""
return self._device_info
@property
def device_class(self):
"""Return the device class."""
return SENSOR_TYPES[self.sensor_type].get(ATTR_DEVICE_CLASS)
@property
def icon(self):
"""Return the icon for the sensor."""
return SENSOR_TYPES[self.sensor_type][ATTR_ICON]
@property
def name(self):
"""Return the name of the sensor."""
parent_device_name = self._device_info["name"]
sensor_name = SENSOR_TYPES[self.sensor_type][ATTR_LABEL]
return f"{parent_device_name} {sensor_name}"
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return SENSOR_TYPES[self.sensor_type].get(ATTR_UNIT)
async def async_update(self):
"""Update the router data."""
await self.api.async_update()
self._state = self.api.data.get(
SENSOR_TYPES[self.sensor_type][ATTR_API_DATA_FIELD]
)
|
from unittest import TestCase
from httpobs.scanner.analyzer.utils import is_hpkp_preloaded, is_hsts_preloaded
class TestPreloadPublicKeyPinning(TestCase):
def test_not_preloaded(self):
result = is_hpkp_preloaded('totallyfakehostname.insertsuperduperfakedomainhere.wtftld')
self.assertFalse(result)
def test_preloaded(self):
result = is_hpkp_preloaded('apis.google.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
result = is_hpkp_preloaded('foo.apis.google.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
# uses include_subdomains_for_pinning
result = is_hpkp_preloaded('dropboxstatic.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
# this domain is manually pinned
result = is_hpkp_preloaded('aus4.mozilla.org')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
class TestPreloadStrictTransportSecurity(TestCase):
def test_not_preloaded(self):
result = is_hsts_preloaded('totallyfakehostname.insertsuperduperfakedomainhere.wtftld')
self.assertFalse(result)
def test_preloaded(self):
result = is_hsts_preloaded('bugzilla.mozilla.org')
self.assertEquals('force-https', result['mode'])
self.assertTrue(result['includeSubDomains'])
result = is_hsts_preloaded('foo.bugzilla.mozilla.org')
self.assertEquals('force-https', result['mode'])
self.assertTrue(result['includeSubDomains'])
result = is_hsts_preloaded('mail.yahoo.com')
self.assertEqual('force-https', result['mode'])
self.assertFalse(result['includeSubDomains'])
# this domain is manually pinned
result = is_hsts_preloaded('aus4.mozilla.org')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
|
from __future__ import absolute_import, print_function, unicode_literals
import sys
import threading
import cursor
from halo import Halo
from halo._utils import (colored_frame, decode_utf_8_text)
class HaloNotebook(Halo):
def __init__(self, text='', color='cyan', text_color=None, spinner=None, placement='left',
animation=None, interval=-1, enabled=True, stream=sys.stdout):
super(HaloNotebook, self).__init__(text=text,
color=color,
text_color=text_color,
spinner=spinner,
placement=placement,
animation=animation,
interval=interval, enabled=enabled,
stream=stream)
self.output = self._make_output_widget()
def _make_output_widget(self):
from ipywidgets.widgets import Output
return Output()
# TODO: using property and setter
def _output(self, text=''):
return ({'name': 'stdout', 'output_type': 'stream', 'text': text},)
def clear(self):
if not self.enabled:
return self
with self.output:
self.output.outputs += self._output('\r')
self.output.outputs += self._output(self.CLEAR_LINE)
self.output.outputs = self._output()
return self
def _render_frame(self):
frame = self.frame()
output = '\r{}'.format(frame)
with self.output:
self.output.outputs += self._output(output)
def start(self, text=None):
if text is not None:
self.text = text
if not self.enabled or self._spinner_id is not None:
return self
if self._stream.isatty():
cursor.hide()
self.output = self._make_output_widget()
from IPython.display import display
display(self.output)
self._stop_spinner = threading.Event()
self._spinner_thread = threading.Thread(target=self.render)
self._spinner_thread.setDaemon(True)
self._render_frame()
self._spinner_id = self._spinner_thread.name
self._spinner_thread.start()
return self
def stop_and_persist(self, symbol=' ', text=None):
"""Stops the spinner and persists the final frame to be shown.
Parameters
----------
symbol : str, optional
Symbol to be shown in final frame
text: str, optional
Text to be shown in final frame
Returns
-------
self
"""
if not self.enabled:
return self
symbol = decode_utf_8_text(symbol)
if text is not None:
text = decode_utf_8_text(text)
else:
text = self._text['original']
text = text.strip()
if self._text_color:
text = colored_frame(text, self._text_color)
self.stop()
output = '\r{} {}\n'.format(*[
(text, symbol)
if self._placement == 'right' else
(symbol, text)
][0])
with self.output:
self.output.outputs = self._output(output)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import re
import sys
import threading
import time
import traceback
import unittest
from xml.sax import saxutils
from absl.testing import _pretty_print_reporter
from absl.third_party import unittest3_backport
import six
# See http://www.w3.org/TR/REC-xml/#NT-Char
_bad_control_character_codes = set(range(0, 0x20)) - {0x9, 0xA, 0xD}
_control_character_conversions = {
chr(i): '\\x{:02x}'.format(i) for i in _bad_control_character_codes}
_escape_xml_attr_conversions = {
'"': '"',
"'": ''',
'\n': '
',
'\t': '	',
'\r': '
',
' ': ' '}
_escape_xml_attr_conversions.update(_control_character_conversions)
# When class or module level function fails, unittest/suite.py adds a
# _ErrorHolder instance instead of a real TestCase, and it has a description
# like "setUpClass (__main__.MyTestCase)".
_CLASS_OR_MODULE_LEVEL_TEST_DESC_REGEX = re.compile(r'^(\w+) \((\S+)\)$')
# NOTE: while saxutils.quoteattr() theoretically does the same thing; it
# seems to often end up being too smart for it's own good not escaping properly.
# This function is much more reliable.
def _escape_xml_attr(content):
"""Escapes xml attributes."""
# Note: saxutils doesn't escape the quotes.
return saxutils.escape(content, _escape_xml_attr_conversions)
def _escape_cdata(s):
"""Escapes a string to be used as XML CDATA.
CDATA characters are treated strictly as character data, not as XML markup,
but there are still certain restrictions on them.
Args:
s: the string to be escaped.
Returns:
An escaped version of the input string.
"""
for char, escaped in six.iteritems(_control_character_conversions):
s = s.replace(char, escaped)
return s.replace(']]>', ']] >')
def _iso8601_timestamp(timestamp):
"""Produces an ISO8601 datetime.
Args:
timestamp: an Epoch based timestamp in seconds.
Returns:
A iso8601 format timestamp if the input is a valid timestamp, None otherwise
"""
if timestamp is None or timestamp < 0:
return None
# Use utcfromtimestamp in PY2 because it doesn't have a built-in UTC object
if six.PY2:
return '%s+00:00' % datetime.datetime.utcfromtimestamp(
timestamp).isoformat()
else:
return datetime.datetime.fromtimestamp(
timestamp, tz=datetime.timezone.utc).isoformat()
def _print_xml_element_header(element, attributes, stream, indentation=''):
"""Prints an XML header of an arbitrary element.
Args:
element: element name (testsuites, testsuite, testcase)
attributes: 2-tuple list with (attributes, values) already escaped
stream: output stream to write test report XML to
indentation: indentation added to the element header
"""
stream.write('%s<%s' % (indentation, element))
for attribute in attributes:
if len(attribute) == 2 \
and attribute[0] is not None and attribute[1] is not None:
stream.write(' %s="%s"' % (attribute[0], attribute[1]))
stream.write('>\n')
# Copy time.time which ensures the real time is used internally.
# This prevents bad interactions with tests that stub out time.
_time_copy = time.time
if hasattr(traceback, '_some_str'):
# Use the traceback module str function to format safely.
_safe_str = traceback._some_str
else:
_safe_str = str # pylint: disable=invalid-name
class _TestCaseResult(object):
"""Private helper for _TextAndXMLTestResult that represents a test result.
Attributes:
test: A TestCase instance of an individual test method.
name: The name of the individual test method.
full_class_name: The full name of the test class.
run_time: The duration (in seconds) it took to run the test.
start_time: Epoch relative timestamp of when test started (in seconds)
errors: A list of error 4-tuples. Error tuple entries are
1) a string identifier of either "failure" or "error"
2) an exception_type
3) an exception_message
4) a string version of a sys.exc_info()-style tuple of values
('error', err[0], err[1], self._exc_info_to_string(err))
If the length of errors is 0, then the test is either passed or
skipped.
skip_reason: A string explaining why the test was skipped.
"""
def __init__(self, test):
self.run_time = -1
self.start_time = -1
self.skip_reason = None
self.errors = []
self.test = test
# Parse the test id to get its test name and full class path.
# Unfortunately there is no better way of knowning the test and class.
# Worse, unittest uses _ErrorHandler instances to represent class / module
# level failures.
test_desc = test.id() or str(test)
# Check if it's something like "setUpClass (__main__.TestCase)".
match = _CLASS_OR_MODULE_LEVEL_TEST_DESC_REGEX.match(test_desc)
if match:
name = match.group(1)
full_class_name = match.group(2)
else:
class_name = unittest.util.strclass(test.__class__)
if ((six.PY3 and isinstance(test, unittest.case._SubTest)) or
(six.PY2 and isinstance(test, unittest3_backport.case._SubTest))):
# If the test case is a _SubTest, the real TestCase instance is
# available as _SubTest.test_case.
class_name = unittest.util.strclass(test.test_case.__class__)
if test_desc.startswith(class_name + '.'):
# In a typical unittest.TestCase scenario, test.id() returns with
# a class name formatted using unittest.util.strclass.
name = test_desc[len(class_name)+1:]
full_class_name = class_name
else:
# Otherwise make a best effort to guess the test name and full class
# path.
parts = test_desc.rsplit('.', 1)
name = parts[-1]
full_class_name = parts[0] if len(parts) == 2 else ''
self.name = _escape_xml_attr(name)
self.full_class_name = _escape_xml_attr(full_class_name)
def set_run_time(self, time_in_secs):
self.run_time = time_in_secs
def set_start_time(self, time_in_secs):
self.start_time = time_in_secs
def print_xml_summary(self, stream):
"""Prints an XML Summary of a TestCase.
Status and result are populated as per JUnit XML test result reporter.
A test that has been skipped will always have a skip reason,
as every skip method in Python's unittest requires the reason arg to be
passed.
Args:
stream: output stream to write test report XML to
"""
if self.skip_reason is None:
status = 'run'
result = 'completed'
else:
status = 'notrun'
result = 'suppressed'
test_case_attributes = [
('name', '%s' % self.name),
('status', '%s' % status),
('result', '%s' % result),
('time', '%.1f' % self.run_time),
('classname', self.full_class_name),
('timestamp', _iso8601_timestamp(self.start_time)),
]
_print_xml_element_header('testcase', test_case_attributes, stream, ' ')
self._print_testcase_details(stream)
stream.write(' </testcase>\n')
def _print_testcase_details(self, stream):
for error in self.errors:
outcome, exception_type, message, error_msg = error # pylint: disable=unpacking-non-sequence
message = _escape_xml_attr(_safe_str(message))
exception_type = _escape_xml_attr(str(exception_type))
error_msg = _escape_cdata(error_msg)
stream.write(' <%s message="%s" type="%s"><![CDATA[%s]]></%s>\n'
% (outcome, message, exception_type, error_msg, outcome))
class _TestSuiteResult(object):
"""Private helper for _TextAndXMLTestResult."""
def __init__(self):
self.suites = {}
self.failure_counts = {}
self.error_counts = {}
self.overall_start_time = -1
self.overall_end_time = -1
self._testsuites_properties = {}
def add_test_case_result(self, test_case_result):
suite_name = type(test_case_result.test).__name__
if suite_name == '_ErrorHolder':
# _ErrorHolder is a special case created by unittest for class / module
# level functions.
suite_name = test_case_result.full_class_name.rsplit('.')[-1]
if ((six.PY3 and
isinstance(test_case_result.test, unittest.case._SubTest)) or
(six.PY2 and
isinstance(test_case_result.test, unittest3_backport.case._SubTest))):
# If the test case is a _SubTest, the real TestCase instance is
# available as _SubTest.test_case.
suite_name = type(test_case_result.test.test_case).__name__
self._setup_test_suite(suite_name)
self.suites[suite_name].append(test_case_result)
for error in test_case_result.errors:
# Only count the first failure or error so that the sum is equal to the
# total number of *testcases* that have failures or errors.
if error[0] == 'failure':
self.failure_counts[suite_name] += 1
break
elif error[0] == 'error':
self.error_counts[suite_name] += 1
break
def print_xml_summary(self, stream):
overall_test_count = sum(len(x) for x in self.suites.values())
overall_failures = sum(self.failure_counts.values())
overall_errors = sum(self.error_counts.values())
overall_attributes = [
('name', ''),
('tests', '%d' % overall_test_count),
('failures', '%d' % overall_failures),
('errors', '%d' % overall_errors),
('time', '%.1f' % (self.overall_end_time - self.overall_start_time)),
('timestamp', _iso8601_timestamp(self.overall_start_time)),
]
_print_xml_element_header('testsuites', overall_attributes, stream)
if self._testsuites_properties:
stream.write(' <properties>\n')
for name, value in sorted(six.iteritems(self._testsuites_properties)):
stream.write(' <property name="%s" value="%s"></property>\n' %
(_escape_xml_attr(name), _escape_xml_attr(str(value))))
stream.write(' </properties>\n')
for suite_name in self.suites:
suite = self.suites[suite_name]
suite_end_time = max(x.start_time + x.run_time for x in suite)
suite_start_time = min(x.start_time for x in suite)
failures = self.failure_counts[suite_name]
errors = self.error_counts[suite_name]
suite_attributes = [
('name', '%s' % suite_name),
('tests', '%d' % len(suite)),
('failures', '%d' % failures),
('errors', '%d' % errors),
('time', '%.1f' % (suite_end_time - suite_start_time)),
('timestamp', _iso8601_timestamp(suite_start_time)),
]
_print_xml_element_header('testsuite', suite_attributes, stream)
for test_case_result in suite:
test_case_result.print_xml_summary(stream)
stream.write('</testsuite>\n')
stream.write('</testsuites>\n')
def _setup_test_suite(self, suite_name):
"""Adds a test suite to the set of suites tracked by this test run.
Args:
suite_name: string, The name of the test suite being initialized.
"""
if suite_name in self.suites:
return
self.suites[suite_name] = []
self.failure_counts[suite_name] = 0
self.error_counts[suite_name] = 0
def set_end_time(self, timestamp_in_secs):
"""Sets the start timestamp of this test suite.
Args:
timestamp_in_secs: timestamp in seconds since epoch
"""
self.overall_end_time = timestamp_in_secs
def set_start_time(self, timestamp_in_secs):
"""Sets the end timestamp of this test suite.
Args:
timestamp_in_secs: timestamp in seconds since epoch
"""
self.overall_start_time = timestamp_in_secs
class _TextAndXMLTestResult(_pretty_print_reporter.TextTestResult):
"""Private TestResult class that produces both formatted text results and XML.
Used by TextAndXMLTestRunner.
"""
_TEST_SUITE_RESULT_CLASS = _TestSuiteResult
_TEST_CASE_RESULT_CLASS = _TestCaseResult
def __init__(self, xml_stream, stream, descriptions, verbosity,
time_getter=_time_copy, testsuites_properties=None):
super(_TextAndXMLTestResult, self).__init__(stream, descriptions, verbosity)
self.xml_stream = xml_stream
self.pending_test_case_results = {}
self.suite = self._TEST_SUITE_RESULT_CLASS()
if testsuites_properties:
self.suite._testsuites_properties = testsuites_properties
self.time_getter = time_getter
# This lock guards any mutations on pending_test_case_results.
self._pending_test_case_results_lock = threading.RLock()
def startTest(self, test):
self.start_time = self.time_getter()
super(_TextAndXMLTestResult, self).startTest(test)
def stopTest(self, test):
# Grabbing the write lock to avoid conflicting with stopTestRun.
with self._pending_test_case_results_lock:
super(_TextAndXMLTestResult, self).stopTest(test)
result = self.get_pending_test_case_result(test)
if not result:
test_name = test.id() or str(test)
sys.stderr.write('No pending test case: %s\n' % test_name)
return
test_id = id(test)
run_time = self.time_getter() - self.start_time
result.set_run_time(run_time)
result.set_start_time(self.start_time)
self.suite.add_test_case_result(result)
del self.pending_test_case_results[test_id]
def startTestRun(self):
self.suite.set_start_time(self.time_getter())
super(_TextAndXMLTestResult, self).startTestRun()
def stopTestRun(self):
self.suite.set_end_time(self.time_getter())
# All pending_test_case_results will be added to the suite and removed from
# the pending_test_case_results dictionary. Grabing the write lock to avoid
# results from being added during this process to avoid duplicating adds or
# accidentally erasing newly appended pending results.
with self._pending_test_case_results_lock:
# Errors in the test fixture (setUpModule, tearDownModule,
# setUpClass, tearDownClass) can leave a pending result which
# never gets added to the suite. The runner calls stopTestRun
# which gives us an opportunity to add these errors for
# reporting here.
for test_id in self.pending_test_case_results:
result = self.pending_test_case_results[test_id]
if hasattr(self, 'start_time'):
run_time = self.suite.overall_end_time - self.start_time
result.set_run_time(run_time)
result.set_start_time(self.start_time)
self.suite.add_test_case_result(result)
self.pending_test_case_results.clear()
def _exc_info_to_string(self, err, test=None):
"""Converts a sys.exc_info()-style tuple of values into a string.
This method must be overridden because the method signature in
unittest.TestResult changed between Python 2.2 and 2.4.
Args:
err: A sys.exc_info() tuple of values for an error.
test: The test method.
Returns:
A formatted exception string.
"""
if test:
return super(_TextAndXMLTestResult, self)._exc_info_to_string(err, test)
return ''.join(traceback.format_exception(*err))
def add_pending_test_case_result(self, test, error_summary=None,
skip_reason=None):
"""Adds result information to a test case result which may still be running.
If a result entry for the test already exists, add_pending_test_case_result
will add error summary tuples and/or overwrite skip_reason for the result.
If it does not yet exist, a result entry will be created.
Note that a test result is considered to have been run and passed
only if there are no errors or skip_reason.
Args:
test: A test method as defined by unittest
error_summary: A 4-tuple with the following entries:
1) a string identifier of either "failure" or "error"
2) an exception_type
3) an exception_message
4) a string version of a sys.exc_info()-style tuple of values
('error', err[0], err[1], self._exc_info_to_string(err))
If the length of errors is 0, then the test is either passed or
skipped.
skip_reason: a string explaining why the test was skipped
"""
with self._pending_test_case_results_lock:
test_id = id(test)
if test_id not in self.pending_test_case_results:
self.pending_test_case_results[test_id] = self._TEST_CASE_RESULT_CLASS(
test)
if error_summary:
self.pending_test_case_results[test_id].errors.append(error_summary)
if skip_reason:
self.pending_test_case_results[test_id].skip_reason = skip_reason
def delete_pending_test_case_result(self, test):
with self._pending_test_case_results_lock:
test_id = id(test)
del self.pending_test_case_results[test_id]
def get_pending_test_case_result(self, test):
test_id = id(test)
return self.pending_test_case_results.get(test_id, None)
def addSuccess(self, test):
super(_TextAndXMLTestResult, self).addSuccess(test)
self.add_pending_test_case_result(test)
def addError(self, test, err):
super(_TextAndXMLTestResult, self).addError(test, err)
error_summary = ('error', err[0], err[1],
self._exc_info_to_string(err, test=test))
self.add_pending_test_case_result(test, error_summary=error_summary)
def addFailure(self, test, err):
super(_TextAndXMLTestResult, self).addFailure(test, err)
error_summary = ('failure', err[0], err[1],
self._exc_info_to_string(err, test=test))
self.add_pending_test_case_result(test, error_summary=error_summary)
def addSkip(self, test, reason):
super(_TextAndXMLTestResult, self).addSkip(test, reason)
self.add_pending_test_case_result(test, skip_reason=reason)
def addExpectedFailure(self, test, err):
super(_TextAndXMLTestResult, self).addExpectedFailure(test, err)
if callable(getattr(test, 'recordProperty', None)):
test.recordProperty('EXPECTED_FAILURE',
self._exc_info_to_string(err, test=test))
self.add_pending_test_case_result(test)
def addUnexpectedSuccess(self, test):
super(_TextAndXMLTestResult, self).addUnexpectedSuccess(test)
test_name = test.id() or str(test)
error_summary = ('error', '', '',
'Test case %s should have failed, but passed.'
% (test_name))
self.add_pending_test_case_result(test, error_summary=error_summary)
def addSubTest(self, test, subtest, err): # pylint: disable=invalid-name
super(_TextAndXMLTestResult, self).addSubTest(test, subtest, err)
if err is not None:
if issubclass(err[0], test.failureException):
error_summary = ('failure', err[0], err[1],
self._exc_info_to_string(err, test=test))
else:
error_summary = ('error', err[0], err[1],
self._exc_info_to_string(err, test=test))
else:
error_summary = None
self.add_pending_test_case_result(subtest, error_summary=error_summary)
def printErrors(self):
super(_TextAndXMLTestResult, self).printErrors()
self.xml_stream.write('<?xml version="1.0"?>\n')
self.suite.print_xml_summary(self.xml_stream)
class TextAndXMLTestRunner(unittest.TextTestRunner):
"""A test runner that produces both formatted text results and XML.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
_TEST_RESULT_CLASS = _TextAndXMLTestResult
_xml_stream = None
_testsuites_properties = {}
def __init__(self, xml_stream=None, *args, **kwargs):
"""Initialize a TextAndXMLTestRunner.
Args:
xml_stream: file-like or None; XML-formatted test results are output
via this object's write() method. If None (the default), the
new instance behaves as described in the set_default_xml_stream method
documentation below.
*args: passed unmodified to unittest.TextTestRunner.__init__.
**kwargs: passed unmodified to unittest.TextTestRunner.__init__.
"""
super(TextAndXMLTestRunner, self).__init__(*args, **kwargs)
if xml_stream is not None:
self._xml_stream = xml_stream
# else, do not set self._xml_stream to None -- this allows implicit fallback
# to the class attribute's value.
@classmethod
def set_default_xml_stream(cls, xml_stream):
"""Sets the default XML stream for the class.
Args:
xml_stream: file-like or None; used for instances when xml_stream is None
or not passed to their constructors. If None is passed, instances
created with xml_stream=None will act as ordinary TextTestRunner
instances; this is the default state before any calls to this method
have been made.
"""
cls._xml_stream = xml_stream
def _makeResult(self):
if self._xml_stream is None:
return super(TextAndXMLTestRunner, self)._makeResult()
else:
return self._TEST_RESULT_CLASS(
self._xml_stream, self.stream, self.descriptions, self.verbosity,
testsuites_properties=self._testsuites_properties)
@classmethod
def set_testsuites_property(cls, key, value):
cls._testsuites_properties[key] = value
|
import copy
import datetime
import logging
import re
import voluptuous as vol
from homeassistant.components import mqtt
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
SUPPORT_ALARM_TRIGGER,
)
from homeassistant.const import (
CONF_CODE,
CONF_DELAY_TIME,
CONF_DISARM_AFTER_TRIGGER,
CONF_NAME,
CONF_PENDING_TIME,
CONF_PLATFORM,
CONF_TRIGGER_TIME,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import (
async_track_state_change_event,
track_point_in_time,
)
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_CODE_TEMPLATE = "code_template"
CONF_CODE_ARM_REQUIRED = "code_arm_required"
CONF_PAYLOAD_DISARM = "payload_disarm"
CONF_PAYLOAD_ARM_HOME = "payload_arm_home"
CONF_PAYLOAD_ARM_AWAY = "payload_arm_away"
CONF_PAYLOAD_ARM_NIGHT = "payload_arm_night"
DEFAULT_ALARM_NAME = "HA Alarm"
DEFAULT_DELAY_TIME = datetime.timedelta(seconds=0)
DEFAULT_PENDING_TIME = datetime.timedelta(seconds=60)
DEFAULT_TRIGGER_TIME = datetime.timedelta(seconds=120)
DEFAULT_DISARM_AFTER_TRIGGER = False
DEFAULT_ARM_AWAY = "ARM_AWAY"
DEFAULT_ARM_HOME = "ARM_HOME"
DEFAULT_ARM_NIGHT = "ARM_NIGHT"
DEFAULT_DISARM = "DISARM"
SUPPORTED_STATES = [
STATE_ALARM_DISARMED,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_TRIGGERED,
]
SUPPORTED_PRETRIGGER_STATES = [
state for state in SUPPORTED_STATES if state != STATE_ALARM_TRIGGERED
]
SUPPORTED_PENDING_STATES = [
state for state in SUPPORTED_STATES if state != STATE_ALARM_DISARMED
]
ATTR_PRE_PENDING_STATE = "pre_pending_state"
ATTR_POST_PENDING_STATE = "post_pending_state"
def _state_validator(config):
"""Validate the state."""
config = copy.deepcopy(config)
for state in SUPPORTED_PRETRIGGER_STATES:
if CONF_DELAY_TIME not in config[state]:
config[state][CONF_DELAY_TIME] = config[CONF_DELAY_TIME]
if CONF_TRIGGER_TIME not in config[state]:
config[state][CONF_TRIGGER_TIME] = config[CONF_TRIGGER_TIME]
for state in SUPPORTED_PENDING_STATES:
if CONF_PENDING_TIME not in config[state]:
config[state][CONF_PENDING_TIME] = config[CONF_PENDING_TIME]
return config
def _state_schema(state):
"""Validate the state."""
schema = {}
if state in SUPPORTED_PRETRIGGER_STATES:
schema[vol.Optional(CONF_DELAY_TIME)] = vol.All(
cv.time_period, cv.positive_timedelta
)
schema[vol.Optional(CONF_TRIGGER_TIME)] = vol.All(
cv.time_period, cv.positive_timedelta
)
if state in SUPPORTED_PENDING_STATES:
schema[vol.Optional(CONF_PENDING_TIME)] = vol.All(
cv.time_period, cv.positive_timedelta
)
return vol.Schema(schema)
PLATFORM_SCHEMA = vol.Schema(
vol.All(
mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): "manual_mqtt",
vol.Optional(CONF_NAME, default=DEFAULT_ALARM_NAME): cv.string,
vol.Exclusive(CONF_CODE, "code validation"): cv.string,
vol.Exclusive(CONF_CODE_TEMPLATE, "code validation"): cv.template,
vol.Optional(CONF_DELAY_TIME, default=DEFAULT_DELAY_TIME): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_PENDING_TIME, default=DEFAULT_PENDING_TIME): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_TRIGGER_TIME, default=DEFAULT_TRIGGER_TIME): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(
CONF_DISARM_AFTER_TRIGGER, default=DEFAULT_DISARM_AFTER_TRIGGER
): cv.boolean,
vol.Optional(STATE_ALARM_ARMED_AWAY, default={}): _state_schema(
STATE_ALARM_ARMED_AWAY
),
vol.Optional(STATE_ALARM_ARMED_HOME, default={}): _state_schema(
STATE_ALARM_ARMED_HOME
),
vol.Optional(STATE_ALARM_ARMED_NIGHT, default={}): _state_schema(
STATE_ALARM_ARMED_NIGHT
),
vol.Optional(STATE_ALARM_DISARMED, default={}): _state_schema(
STATE_ALARM_DISARMED
),
vol.Optional(STATE_ALARM_TRIGGERED, default={}): _state_schema(
STATE_ALARM_TRIGGERED
),
vol.Required(mqtt.CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Required(mqtt.CONF_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_CODE_ARM_REQUIRED, default=True): cv.boolean,
vol.Optional(
CONF_PAYLOAD_ARM_AWAY, default=DEFAULT_ARM_AWAY
): cv.string,
vol.Optional(
CONF_PAYLOAD_ARM_HOME, default=DEFAULT_ARM_HOME
): cv.string,
vol.Optional(
CONF_PAYLOAD_ARM_NIGHT, default=DEFAULT_ARM_NIGHT
): cv.string,
vol.Optional(CONF_PAYLOAD_DISARM, default=DEFAULT_DISARM): cv.string,
}
),
_state_validator,
)
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the manual MQTT alarm platform."""
add_entities(
[
ManualMQTTAlarm(
hass,
config[CONF_NAME],
config.get(CONF_CODE),
config.get(CONF_CODE_TEMPLATE),
config.get(CONF_DISARM_AFTER_TRIGGER, DEFAULT_DISARM_AFTER_TRIGGER),
config.get(mqtt.CONF_STATE_TOPIC),
config.get(mqtt.CONF_COMMAND_TOPIC),
config.get(mqtt.CONF_QOS),
config.get(CONF_CODE_ARM_REQUIRED),
config.get(CONF_PAYLOAD_DISARM),
config.get(CONF_PAYLOAD_ARM_HOME),
config.get(CONF_PAYLOAD_ARM_AWAY),
config.get(CONF_PAYLOAD_ARM_NIGHT),
config,
)
]
)
class ManualMQTTAlarm(alarm.AlarmControlPanelEntity):
"""
Representation of an alarm status.
When armed, will be pending for 'pending_time', after that armed.
When triggered, will be pending for the triggering state's 'delay_time'
plus the triggered state's 'pending_time'.
After that will be triggered for 'trigger_time', after that we return to
the previous state or disarm if `disarm_after_trigger` is true.
A trigger_time of zero disables the alarm_trigger service.
"""
def __init__(
self,
hass,
name,
code,
code_template,
disarm_after_trigger,
state_topic,
command_topic,
qos,
code_arm_required,
payload_disarm,
payload_arm_home,
payload_arm_away,
payload_arm_night,
config,
):
"""Init the manual MQTT alarm panel."""
self._state = STATE_ALARM_DISARMED
self._hass = hass
self._name = name
if code_template:
self._code = code_template
self._code.hass = hass
else:
self._code = code or None
self._disarm_after_trigger = disarm_after_trigger
self._previous_state = self._state
self._state_ts = None
self._delay_time_by_state = {
state: config[state][CONF_DELAY_TIME]
for state in SUPPORTED_PRETRIGGER_STATES
}
self._trigger_time_by_state = {
state: config[state][CONF_TRIGGER_TIME]
for state in SUPPORTED_PRETRIGGER_STATES
}
self._pending_time_by_state = {
state: config[state][CONF_PENDING_TIME]
for state in SUPPORTED_PENDING_STATES
}
self._state_topic = state_topic
self._command_topic = command_topic
self._qos = qos
self._code_arm_required = code_arm_required
self._payload_disarm = payload_disarm
self._payload_arm_home = payload_arm_home
self._payload_arm_away = payload_arm_away
self._payload_arm_night = payload_arm_night
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._state == STATE_ALARM_TRIGGERED:
if self._within_pending_time(self._state):
return STATE_ALARM_PENDING
trigger_time = self._trigger_time_by_state[self._previous_state]
if (
self._state_ts + self._pending_time(self._state) + trigger_time
) < dt_util.utcnow():
if self._disarm_after_trigger:
return STATE_ALARM_DISARMED
self._state = self._previous_state
return self._state
if self._state in SUPPORTED_PENDING_STATES and self._within_pending_time(
self._state
):
return STATE_ALARM_PENDING
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return (
SUPPORT_ALARM_ARM_HOME
| SUPPORT_ALARM_ARM_AWAY
| SUPPORT_ALARM_ARM_NIGHT
| SUPPORT_ALARM_TRIGGER
)
@property
def _active_state(self):
"""Get the current state."""
if self.state == STATE_ALARM_PENDING:
return self._previous_state
return self._state
def _pending_time(self, state):
"""Get the pending time."""
pending_time = self._pending_time_by_state[state]
if state == STATE_ALARM_TRIGGERED:
pending_time += self._delay_time_by_state[self._previous_state]
return pending_time
def _within_pending_time(self, state):
"""Get if the action is in the pending time window."""
return self._state_ts + self._pending_time(state) > dt_util.utcnow()
@property
def code_format(self):
"""Return one or more digits/characters."""
if self._code is None:
return None
if isinstance(self._code, str) and re.search("^\\d+$", self._code):
return alarm.FORMAT_NUMBER
return alarm.FORMAT_TEXT
@property
def code_arm_required(self):
"""Whether the code is required for arm actions."""
return self._code_arm_required
def alarm_disarm(self, code=None):
"""Send disarm command."""
if not self._validate_code(code, STATE_ALARM_DISARMED):
return
self._state = STATE_ALARM_DISARMED
self._state_ts = dt_util.utcnow()
self.schedule_update_ha_state()
def alarm_arm_home(self, code=None):
"""Send arm home command."""
if self._code_arm_required and not self._validate_code(
code, STATE_ALARM_ARMED_HOME
):
return
self._update_state(STATE_ALARM_ARMED_HOME)
def alarm_arm_away(self, code=None):
"""Send arm away command."""
if self._code_arm_required and not self._validate_code(
code, STATE_ALARM_ARMED_AWAY
):
return
self._update_state(STATE_ALARM_ARMED_AWAY)
def alarm_arm_night(self, code=None):
"""Send arm night command."""
if self._code_arm_required and not self._validate_code(
code, STATE_ALARM_ARMED_NIGHT
):
return
self._update_state(STATE_ALARM_ARMED_NIGHT)
def alarm_trigger(self, code=None):
"""
Send alarm trigger command.
No code needed, a trigger time of zero for the current state
disables the alarm.
"""
if not self._trigger_time_by_state[self._active_state]:
return
self._update_state(STATE_ALARM_TRIGGERED)
def _update_state(self, state):
"""Update the state."""
if self._state == state:
return
self._previous_state = self._state
self._state = state
self._state_ts = dt_util.utcnow()
self.schedule_update_ha_state()
pending_time = self._pending_time(state)
if state == STATE_ALARM_TRIGGERED:
track_point_in_time(
self._hass, self.async_update_ha_state, self._state_ts + pending_time
)
trigger_time = self._trigger_time_by_state[self._previous_state]
track_point_in_time(
self._hass,
self.async_update_ha_state,
self._state_ts + pending_time + trigger_time,
)
elif state in SUPPORTED_PENDING_STATES and pending_time:
track_point_in_time(
self._hass, self.async_update_ha_state, self._state_ts + pending_time
)
def _validate_code(self, code, state):
"""Validate given code."""
if self._code is None:
return True
if isinstance(self._code, str):
alarm_code = self._code
else:
alarm_code = self._code.render(
from_state=self._state, to_state=state, parse_result=False
)
check = not alarm_code or code == alarm_code
if not check:
_LOGGER.warning("Invalid code given for %s", state)
return check
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self.state != STATE_ALARM_PENDING:
return {}
return {
ATTR_PRE_PENDING_STATE: self._previous_state,
ATTR_POST_PENDING_STATE: self._state,
}
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
async_track_state_change_event(
self.hass, [self.entity_id], self._async_state_changed_listener
)
async def message_received(msg):
"""Run when new MQTT message has been received."""
if msg.payload == self._payload_disarm:
await self.async_alarm_disarm(self._code)
elif msg.payload == self._payload_arm_home:
await self.async_alarm_arm_home(self._code)
elif msg.payload == self._payload_arm_away:
await self.async_alarm_arm_away(self._code)
elif msg.payload == self._payload_arm_night:
await self.async_alarm_arm_night(self._code)
else:
_LOGGER.warning("Received unexpected payload: %s", msg.payload)
return
await mqtt.async_subscribe(
self.hass, self._command_topic, message_received, self._qos
)
async def _async_state_changed_listener(self, event):
"""Publish state change to MQTT."""
new_state = event.data.get("new_state")
if new_state is None:
return
mqtt.async_publish(
self.hass, self._state_topic, new_state.state, self._qos, True
)
|
from datetime import timedelta
from homeassistant.components.flo.const import DOMAIN as FLO_DOMAIN
from homeassistant.components.flo.device import FloDeviceDataUpdateCoordinator
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.setup import async_setup_component
from homeassistant.util import dt
from .common import TEST_PASSWORD, TEST_USER_ID
from tests.common import async_fire_time_changed
async def test_device(hass, config_entry, aioclient_mock_fixture, aioclient_mock):
"""Test Flo by Moen device."""
config_entry.add_to_hass(hass)
assert await async_setup_component(
hass, FLO_DOMAIN, {CONF_USERNAME: TEST_USER_ID, CONF_PASSWORD: TEST_PASSWORD}
)
await hass.async_block_till_done()
assert len(hass.data[FLO_DOMAIN][config_entry.entry_id]["devices"]) == 1
device: FloDeviceDataUpdateCoordinator = hass.data[FLO_DOMAIN][
config_entry.entry_id
]["devices"][0]
assert device.api_client is not None
assert device.available
assert device.consumption_today == 3.674
assert device.current_flow_rate == 0
assert device.current_psi == 54.20000076293945
assert device.current_system_mode == "home"
assert device.target_system_mode == "home"
assert device.firmware_version == "6.1.1"
assert device.device_type == "flo_device_v2"
assert device.id == "98765"
assert device.last_heard_from_time == "2020-07-24T12:45:00Z"
assert device.location_id == "mmnnoopp"
assert device.hass is not None
assert device.temperature == 70
assert device.mac_address == "111111111111"
assert device.model == "flo_device_075_v2"
assert device.manufacturer == "Flo by Moen"
assert device.device_name == "Flo by Moen flo_device_075_v2"
assert device.rssi == -47
assert device.pending_info_alerts_count == 0
assert device.pending_critical_alerts_count == 0
assert device.pending_warning_alerts_count == 2
assert device.has_alerts is True
assert device.last_known_valve_state == "open"
assert device.target_valve_state == "open"
call_count = aioclient_mock.call_count
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=90))
await hass.async_block_till_done()
assert aioclient_mock.call_count == call_count + 2
|
from datetime import timedelta
import logging
from typing import Any, Dict, Optional
import voluptuous as vol
from homeassistant import exceptions
from homeassistant.const import CONF_ATTRIBUTE, CONF_FOR, CONF_PLATFORM, MATCH_ALL
from homeassistant.core import CALLBACK_TYPE, HassJob, HomeAssistant, State, callback
from homeassistant.helpers import config_validation as cv, template
from homeassistant.helpers.event import (
Event,
async_track_same_state,
async_track_state_change_event,
process_state_match,
)
# mypy: allow-incomplete-defs, allow-untyped-calls, allow-untyped-defs
# mypy: no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
CONF_ENTITY_ID = "entity_id"
CONF_FROM = "from"
CONF_TO = "to"
BASE_SCHEMA = {
vol.Required(CONF_PLATFORM): "state",
vol.Required(CONF_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_FOR): cv.positive_time_period_template,
vol.Optional(CONF_ATTRIBUTE): cv.match_all,
}
TRIGGER_STATE_SCHEMA = vol.Schema(
{
**BASE_SCHEMA,
# These are str on purpose. Want to catch YAML conversions
vol.Optional(CONF_FROM): vol.Any(str, [str]),
vol.Optional(CONF_TO): vol.Any(str, [str]),
}
)
TRIGGER_ATTRIBUTE_SCHEMA = vol.Schema(
{
**BASE_SCHEMA,
vol.Optional(CONF_FROM): cv.match_all,
vol.Optional(CONF_TO): cv.match_all,
}
)
def TRIGGER_SCHEMA(value: Any) -> dict: # pylint: disable=invalid-name
"""Validate trigger."""
if not isinstance(value, dict):
raise vol.Invalid("Expected a dictionary")
# We use this approach instead of vol.Any because
# this gives better error messages.
if CONF_ATTRIBUTE in value:
return TRIGGER_ATTRIBUTE_SCHEMA(value)
return TRIGGER_STATE_SCHEMA(value)
async def async_attach_trigger(
hass: HomeAssistant,
config,
action,
automation_info,
*,
platform_type: str = "state",
) -> CALLBACK_TYPE:
"""Listen for state changes based on configuration."""
entity_id = config.get(CONF_ENTITY_ID)
from_state = config.get(CONF_FROM, MATCH_ALL)
to_state = config.get(CONF_TO, MATCH_ALL)
time_delta = config.get(CONF_FOR)
template.attach(hass, time_delta)
match_all = from_state == MATCH_ALL and to_state == MATCH_ALL
unsub_track_same = {}
period: Dict[str, timedelta] = {}
match_from_state = process_state_match(from_state)
match_to_state = process_state_match(to_state)
attribute = config.get(CONF_ATTRIBUTE)
job = HassJob(action)
@callback
def state_automation_listener(event: Event):
"""Listen for state changes and calls action."""
entity: str = event.data["entity_id"]
from_s: Optional[State] = event.data.get("old_state")
to_s: Optional[State] = event.data.get("new_state")
if from_s is None:
old_value = None
elif attribute is None:
old_value = from_s.state
else:
old_value = from_s.attributes.get(attribute)
if to_s is None:
new_value = None
elif attribute is None:
new_value = to_s.state
else:
new_value = to_s.attributes.get(attribute)
# When we listen for state changes with `match_all`, we
# will trigger even if just an attribute changes. When
# we listen to just an attribute, we should ignore all
# other attribute changes.
if attribute is not None and old_value == new_value:
return
if (
not match_from_state(old_value)
or not match_to_state(new_value)
or (not match_all and old_value == new_value)
):
return
@callback
def call_action():
"""Call action with right context."""
hass.async_run_hass_job(
job,
{
"trigger": {
"platform": platform_type,
"entity_id": entity,
"from_state": from_s,
"to_state": to_s,
"for": time_delta if not time_delta else period[entity],
"attribute": attribute,
"description": f"state of {entity}",
}
},
event.context,
)
if not time_delta:
call_action()
return
variables = {
"trigger": {
"platform": "state",
"entity_id": entity,
"from_state": from_s,
"to_state": to_s,
}
}
try:
period[entity] = cv.positive_time_period(
template.render_complex(time_delta, variables)
)
except (exceptions.TemplateError, vol.Invalid) as ex:
_LOGGER.error(
"Error rendering '%s' for template: %s", automation_info["name"], ex
)
return
def _check_same_state(_, _2, new_st: State):
if new_st is None:
return False
if attribute is None:
cur_value = new_st.state
else:
cur_value = new_st.attributes.get(attribute)
if CONF_FROM in config and CONF_TO not in config:
return cur_value != old_value
return cur_value == new_value
unsub_track_same[entity] = async_track_same_state(
hass,
period[entity],
call_action,
_check_same_state,
entity_ids=entity,
)
unsub = async_track_state_change_event(hass, entity_id, state_automation_listener)
@callback
def async_remove():
"""Remove state listeners async."""
unsub()
for async_remove in unsub_track_same.values():
async_remove()
unsub_track_same.clear()
return async_remove
|
from data.module import YO, YOUPI
import data
class Specialization(YOUPI, YO): pass
class Metaclass(type): pass
class Interface: pass
class MyIFace(Interface): pass
class AnotherIFace(Interface): pass
class MyException(Exception): pass
class MyError(MyException): pass
class AbstractClass(object):
def to_override(self, whatever):
raise NotImplementedError()
def return_something(self, param):
if param:
return 'toto'
return
class Concrete0:
__implements__ = MyIFace
class Concrete1:
__implements__ = MyIFace, AnotherIFace
class Concrete2:
__implements__ = (MyIFace,
AnotherIFace)
class Concrete23(Concrete1): pass
del YO.member
del YO
[SYN1, SYN2] = Concrete0, Concrete1
assert '1'
b = 1 | 2 & 3 ^ 8
exec('c = 3')
exec('c = 3', {}, {})
def raise_string(a=2, *args, **kwargs):
raise 'pas glop'
raise Exception('yo')
yield 'coucou'
a = b + 2
c = b * 2
c = b / 2
c = b // 2
c = b - 2
c = b % 2
c = b ** 2
c = b << 2
c = b >> 2
c = ~b
c = not b
d = [c]
e = d[:]
e = d[a:b:c]
raise_string(*args, **kwargs)
print >> stream, 'bonjour'
print >> stream, 'salut',
def make_class(any, base=data.module.YO, *args, **kwargs):
"""check base is correctly resolved to Concrete0"""
class Aaaa(base):
"""dynamic class"""
return Aaaa
|
import gzip
import io
import sys
import boto3
def gzip_compress(data):
#
# gzip.compress does not exist under Py2
#
buf = io.BytesIO()
with gzip.GzipFile(fileobj=buf, mode='wb') as fout:
fout.write(data)
return buf.getvalue()
def _build_contents():
hello_bytes = u"hello wořld\nhow are you?".encode('utf8')
yield 'hello.txt', hello_bytes
yield 'multiline.txt', b'englishman\nin\nnew\nyork\n'
yield 'hello.txt.gz', gzip_compress(hello_bytes)
for i in range(100):
key = 'iter_bucket/%02d.txt' % i
body = '\n'.join("line%i%i" % (i, line_no) for line_no in range(10)).encode('utf8')
yield key, body
CONTENTS = dict(_build_contents())
def main():
bucket_name = sys.argv[1]
bucket = boto3.resource('s3').Bucket(bucket_name)
#
# Assume the bucket exists. Creating it ourselves and dealing with
# timing issues is too much of a PITA.
#
for key in bucket.objects.all():
key.delete()
for (key, body) in CONTENTS.items():
bucket.put_object(Key=key, Body=body)
if __name__ == '__main__':
main()
|
from homeassistant.components import switch
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON
from homeassistant.helpers import discovery
from homeassistant.setup import async_setup_component
from .common import NUMATO_CFG, mockup_raise
MOCKUP_ENTITY_IDS = {
"switch.numato_switch_mock_port5",
"switch.numato_switch_mock_port6",
}
async def test_failing_setups_no_entities(hass, numato_fixture, monkeypatch):
"""When port setup fails, no entity shall be created."""
monkeypatch.setattr(numato_fixture.NumatoDeviceMock, "setup", mockup_raise)
assert await async_setup_component(hass, "numato", NUMATO_CFG)
await hass.async_block_till_done()
for entity_id in MOCKUP_ENTITY_IDS:
assert entity_id not in hass.states.async_entity_ids()
async def test_regular_hass_operations(hass, numato_fixture):
"""Test regular operations from within Home Assistant."""
assert await async_setup_component(hass, "numato", NUMATO_CFG)
await hass.async_block_till_done() # wait until services are registered
await hass.services.async_call(
switch.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "switch.numato_switch_mock_port5"},
blocking=True,
)
assert hass.states.get("switch.numato_switch_mock_port5").state == "on"
assert numato_fixture.devices[0].values[5] == 1
await hass.services.async_call(
switch.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "switch.numato_switch_mock_port6"},
blocking=True,
)
assert hass.states.get("switch.numato_switch_mock_port6").state == "on"
assert numato_fixture.devices[0].values[6] == 1
await hass.services.async_call(
switch.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "switch.numato_switch_mock_port5"},
blocking=True,
)
assert hass.states.get("switch.numato_switch_mock_port5").state == "off"
assert numato_fixture.devices[0].values[5] == 0
await hass.services.async_call(
switch.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "switch.numato_switch_mock_port6"},
blocking=True,
)
assert hass.states.get("switch.numato_switch_mock_port6").state == "off"
assert numato_fixture.devices[0].values[6] == 0
async def test_failing_hass_operations(hass, numato_fixture, monkeypatch):
"""Test failing operations called from within Home Assistant.
Switches remain in their initial 'off' state when the device can't
be written to.
"""
assert await async_setup_component(hass, "numato", NUMATO_CFG)
await hass.async_block_till_done() # wait until services are registered
monkeypatch.setattr(numato_fixture.devices[0], "write", mockup_raise)
await hass.services.async_call(
switch.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "switch.numato_switch_mock_port5"},
blocking=True,
)
assert hass.states.get("switch.numato_switch_mock_port5").state == "off"
assert not numato_fixture.devices[0].values[5]
await hass.services.async_call(
switch.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "switch.numato_switch_mock_port6"},
blocking=True,
)
assert hass.states.get("switch.numato_switch_mock_port6").state == "off"
assert not numato_fixture.devices[0].values[6]
await hass.services.async_call(
switch.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "switch.numato_switch_mock_port5"},
blocking=True,
)
assert hass.states.get("switch.numato_switch_mock_port5").state == "off"
assert not numato_fixture.devices[0].values[5]
await hass.services.async_call(
switch.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "switch.numato_switch_mock_port6"},
blocking=True,
)
assert hass.states.get("switch.numato_switch_mock_port6").state == "off"
assert not numato_fixture.devices[0].values[6]
async def test_switch_setup_without_discovery_info(hass, config, numato_fixture):
"""Test handling of empty discovery_info."""
numato_fixture.discover()
await discovery.async_load_platform(hass, "switch", "numato", None, config)
for entity_id in MOCKUP_ENTITY_IDS:
assert entity_id not in hass.states.async_entity_ids()
await hass.async_block_till_done() # wait for numato platform to be loaded
for entity_id in MOCKUP_ENTITY_IDS:
assert entity_id in hass.states.async_entity_ids()
|
from hass_nabucasa import Cloud
from hass_nabucasa.voice import VoiceError
import voluptuous as vol
from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider
from .const import DOMAIN
CONF_GENDER = "gender"
SUPPORT_LANGUAGES = ["en-US", "de-DE", "es-ES"]
SUPPORT_GENDER = ["male", "female"]
DEFAULT_LANG = "en-US"
DEFAULT_GENDER = "female"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES),
vol.Optional(CONF_GENDER, default=DEFAULT_GENDER): vol.In(SUPPORT_GENDER),
}
)
async def async_get_engine(hass, config, discovery_info=None):
"""Set up Cloud speech component."""
cloud: Cloud = hass.data[DOMAIN]
if discovery_info is not None:
language = DEFAULT_LANG
gender = DEFAULT_GENDER
else:
language = config[CONF_LANG]
gender = config[CONF_GENDER]
return CloudProvider(cloud, language, gender)
class CloudProvider(Provider):
"""NabuCasa Cloud speech API provider."""
def __init__(self, cloud: Cloud, language: str, gender: str):
"""Initialize cloud provider."""
self.cloud = cloud
self.name = "Cloud"
self._language = language
self._gender = gender
@property
def default_language(self):
"""Return the default language."""
return self._language
@property
def supported_languages(self):
"""Return list of supported languages."""
return SUPPORT_LANGUAGES
@property
def supported_options(self):
"""Return list of supported options like voice, emotion."""
return [CONF_GENDER]
@property
def default_options(self):
"""Return a dict include default options."""
return {CONF_GENDER: self._gender}
async def async_get_tts_audio(self, message, language, options=None):
"""Load TTS from NabuCasa Cloud."""
# Process TTS
try:
data = await self.cloud.voice.process_tts(
message, language, gender=options[CONF_GENDER]
)
except VoiceError:
return (None, None)
return ("mp3", data)
|
from django.contrib.sitemaps import Sitemap
from django.db.models import Count
from django.db.models import Max
from django.urls import reverse
from tagging.models import Tag
from tagging.models import TaggedItem
from zinnia.models.author import Author
from zinnia.models.category import Category
from zinnia.models.entry import Entry
from zinnia.settings import PROTOCOL
class ZinniaSitemap(Sitemap):
"""
Base Sitemap class for Zinnia.
"""
protocol = PROTOCOL
class EntrySitemap(ZinniaSitemap):
"""
Sitemap for entries.
"""
priority = 0.5
changefreq = 'weekly'
def items(self):
"""
Return published entries.
"""
return Entry.published.all()
def lastmod(self, obj):
"""
Return last modification of an entry.
"""
return obj.last_update
class EntryRelatedSitemap(ZinniaSitemap):
"""
Sitemap for models related to Entries.
"""
model = None
changefreq = 'monthly'
def items(self):
"""
Get a queryset, cache infos for standardized access to them later
then compute the maximum of entries to define the priority
of each items.
"""
queryset = self.get_queryset()
self.cache_infos(queryset)
self.set_max_entries()
return queryset
def get_queryset(self):
"""
Build a queryset of items with published entries and annotated
with the number of entries and the latest modification date.
"""
return self.model.published.annotate(
count_entries_published=Count('entries')).annotate(
last_update=Max('entries__last_update')).order_by(
'-count_entries_published', '-last_update', '-pk')
def cache_infos(self, queryset):
"""
Cache infos like the number of entries published and
the last modification date for standardized access later.
"""
self.cache = {}
for item in queryset:
self.cache[item.pk] = (item.count_entries_published,
item.last_update)
def set_max_entries(self):
"""
Define the maximum of entries for computing the priority
of each items later.
"""
if self.cache:
self.max_entries = float(max([i[0] for i in self.cache.values()]))
def lastmod(self, item):
"""
The last modification date is defined
by the latest entry last update in the cache.
"""
return self.cache[item.pk][1]
def priority(self, item):
"""
The priority of the item depends of the number of entries published
in the cache divided by the maximum of entries.
"""
return '%.1f' % max(self.cache[item.pk][0] / self.max_entries, 0.1)
class CategorySitemap(EntryRelatedSitemap):
"""
Sitemap for categories.
"""
model = Category
class AuthorSitemap(EntryRelatedSitemap):
"""
Sitemap for authors.
"""
model = Author
class TagSitemap(EntryRelatedSitemap):
"""
Sitemap for tags.
"""
def get_queryset(self):
"""
Return the published Tags with option counts.
"""
self.entries_qs = Entry.published.all()
return Tag.objects.usage_for_queryset(
self.entries_qs, counts=True)
def cache_infos(self, queryset):
"""
Cache the number of entries published and the last
modification date under each tag.
"""
self.cache = {}
for item in queryset:
# If the sitemap is too slow, don't hesitate to do this :
# self.cache[item.pk] = (item.count, None)
self.cache[item.pk] = (
item.count, TaggedItem.objects.get_by_model(
self.entries_qs, item)[0].last_update)
def location(self, item):
"""
Return URL of the tag.
"""
return reverse('zinnia:tag_detail', args=[item.name])
|
from unittest import TestCase
from django.http.request import HttpRequest
from weblate.utils.request import get_ip_address, get_user_agent
class RequestTest(TestCase):
def test_get_ip(self):
request = HttpRequest()
request.META["REMOTE_ADDR"] = "1.2.3.4"
self.assertEqual(get_ip_address(request), "1.2.3.4")
def test_agent(self):
request = HttpRequest()
request.META["HTTP_USER_AGENT"] = "agent"
self.assertEqual(get_user_agent(request), "Other / Other / Other")
def test_agent_long(self):
request = HttpRequest()
request.META["HTTP_USER_AGENT"] = "agent " * 200
self.assertLess(len(get_user_agent(request)), 200)
|
import json
import logging
import os
import re
from absl import flags
from perfkitbenchmarker import container_service
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import kubernetes_helper
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import gcp
from perfkitbenchmarker.providers.gcp import gce_virtual_machine
from perfkitbenchmarker.providers.gcp import util
FLAGS = flags.FLAGS
FLAGS.kubernetes_anti_affinity = False
NVIDIA_DRIVER_SETUP_DAEMON_SET_SCRIPT = 'https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/nvidia-driver-installer/cos/daemonset-preloaded.yaml'
NVIDIA_UNRESTRICTED_PERMISSIONS_DAEMON_SET = 'nvidia_unrestricted_permissions_daemonset.yml'
DEFAULT_CONTAINER_VERSION = 'latest'
SERVICE_ACCOUNT_PATTERN = r'.*((?<!iam)|{project}.iam).gserviceaccount.com'
class GoogleContainerRegistry(container_service.BaseContainerRegistry):
"""Class for building and storing container images on GCP."""
CLOUD = gcp.CLOUD
def __init__(self, registry_spec):
super(GoogleContainerRegistry, self).__init__(registry_spec)
self.project = self.project or util.GetDefaultProject()
def GetFullRegistryTag(self, image):
"""Gets the full tag of the image."""
region = util.GetMultiRegionFromRegion(util.GetRegionFromZone(self.zone))
hostname = '{region}.gcr.io'.format(region=region)
full_tag = '{hostname}/{project}/{name}'.format(
hostname=hostname, project=self.project, name=image)
return full_tag
def Login(self):
"""No-op because Push() handles its own auth."""
pass
def Push(self, image):
"""Push a locally built image to the registry."""
full_tag = self.GetFullRegistryTag(image.name)
tag_cmd = ['docker', 'tag', image.name, full_tag]
vm_util.IssueCommand(tag_cmd)
# vm_util.IssueCommand() is used here instead of util.GcloudCommand()
# because gcloud flags cannot be appended to the command since they
# are interpreted as docker args instead.
push_cmd = [
FLAGS.gcloud_path, '--project', self.project,
'docker', '--', 'push', full_tag
]
vm_util.IssueCommand(push_cmd)
def RemoteBuild(self, image):
"""Build the image remotely."""
full_tag = self.GetFullRegistryTag(image.name)
build_cmd = util.GcloudCommand(self, 'builds', 'submit',
'--tag', full_tag, image.directory)
del build_cmd.flags['zone']
build_cmd.Issue()
class GkeCluster(container_service.KubernetesCluster):
"""Class representing a Google Kubernetes Engine cluster."""
CLOUD = gcp.CLOUD
def __init__(self, spec):
super(GkeCluster, self).__init__(spec)
self.project = spec.vm_spec.project
self.cluster_version = (FLAGS.container_cluster_version or
DEFAULT_CONTAINER_VERSION)
self.use_application_default_credentials = True
def GetResourceMetadata(self):
"""Returns a dict containing metadata about the cluster.
Returns:
dict mapping string property key to value.
"""
result = super(GkeCluster, self).GetResourceMetadata()
result['project'] = self.project
result['container_cluster_version'] = self.cluster_version
result['boot_disk_type'] = self.vm_config.boot_disk_type
result['boot_disk_size'] = self.vm_config.boot_disk_size
if self.vm_config.max_local_disks:
result['gce_local_ssd_count'] = self.vm_config.max_local_disks
# TODO(pclay): support NVME when it leaves alpha
# Also consider moving FLAGS.gce_ssd_interface into the vm_spec.
result['gce_local_ssd_interface'] = gce_virtual_machine.SCSI
return result
def _Create(self):
"""Creates the cluster."""
cmd = util.GcloudCommand(self, 'container', 'clusters', 'create', self.name)
cmd.flags['cluster-version'] = self.cluster_version
if FLAGS.gke_enable_alpha:
cmd.args.append('--enable-kubernetes-alpha')
cmd.args.append('--no-enable-autorepair')
cmd.args.append('--no-enable-autoupgrade')
user = util.GetDefaultUser()
if FLAGS.gcp_service_account:
cmd.flags['service-account'] = FLAGS.gcp_service_account
# Matches service accounts that either definitely belongs to this project or
# are a GCP managed service account like the GCE default service account,
# which we can't tell to which project they belong.
elif re.match(SERVICE_ACCOUNT_PATTERN, user):
logging.info(
'Re-using configured service-account for GKE Cluster: %s', user)
cmd.flags['service-account'] = user
self.use_application_default_credentials = False
else:
logging.info('Using default GCE service account for GKE cluster')
cmd.flags['scopes'] = 'cloud-platform'
if self.vm_config.gpu_count:
cmd.flags['accelerator'] = (
gce_virtual_machine.GenerateAcceleratorSpecString(
self.vm_config.gpu_type, self.vm_config.gpu_count))
if self.vm_config.min_cpu_platform:
cmd.flags['min-cpu-platform'] = self.vm_config.min_cpu_platform
if self.vm_config.boot_disk_size:
cmd.flags['disk-size'] = self.vm_config.boot_disk_size
if self.vm_config.boot_disk_type:
cmd.flags['disk-type'] = self.vm_config.boot_disk_type
if self.vm_config.max_local_disks:
# TODO(pclay): Switch to local-ssd-volumes which support NVME when it
# leaves alpha. See
# https://cloud.google.com/sdk/gcloud/reference/alpha/container/clusters/create
cmd.flags['local-ssd-count'] = self.vm_config.max_local_disks
if self.min_nodes != self.num_nodes or self.max_nodes != self.num_nodes:
cmd.args.append('--enable-autoscaling')
cmd.flags['max-nodes'] = self.max_nodes
cmd.flags['min-nodes'] = self.min_nodes
cmd.flags['num-nodes'] = self.num_nodes
if self.vm_config.machine_type is None:
cmd.flags['machine-type'] = 'custom-{0}-{1}'.format(
self.vm_config.cpus, self.vm_config.memory_mib)
else:
cmd.flags['machine-type'] = self.vm_config.machine_type
cmd.flags['metadata'] = util.MakeFormattedDefaultTags()
cmd.flags['labels'] = util.MakeFormattedDefaultTags()
# This command needs a long timeout due to the many minutes it
# can take to provision a large GPU-accelerated GKE cluster.
_, stderr, retcode = cmd.Issue(timeout=1200, raise_on_failure=False)
if retcode:
# Log specific type of failure, if known.
if 'ZONE_RESOURCE_POOL_EXHAUSTED' in stderr:
logging.exception('Container resources exhausted: %s', stderr)
raise errors.Benchmarks.InsufficientCapacityCloudFailure(
'Container resources exhausted in zone %s: %s' %
(self.zone, stderr))
util.CheckGcloudResponseKnownFailures(stderr, retcode)
raise errors.Resource.CreationError(stderr)
def _PostCreate(self):
"""Acquire cluster authentication."""
super(GkeCluster, self)._PostCreate()
cmd = util.GcloudCommand(
self, 'container', 'clusters', 'get-credentials', self.name)
env = os.environ.copy()
env['KUBECONFIG'] = FLAGS.kubeconfig
cmd.IssueRetryable(env=env)
if self.vm_config.gpu_count:
kubernetes_helper.CreateFromFile(NVIDIA_DRIVER_SETUP_DAEMON_SET_SCRIPT)
kubernetes_helper.CreateFromFile(
data.ResourcePath(NVIDIA_UNRESTRICTED_PERMISSIONS_DAEMON_SET))
def _GetInstanceGroups(self):
cmd = util.GcloudCommand(self, 'container', 'node-pools', 'list')
cmd.flags['cluster'] = self.name
stdout, _, _ = cmd.Issue()
json_output = json.loads(stdout)
instance_groups = []
for node_pool in json_output:
for group_url in node_pool['instanceGroupUrls']:
instance_groups.append(group_url.split('/')[-1]) # last url part
return instance_groups
def _GetInstancesFromInstanceGroup(self, instance_group_name):
cmd = util.GcloudCommand(self, 'compute', 'instance-groups',
'list-instances', instance_group_name)
stdout, _, _ = cmd.Issue()
json_output = json.loads(stdout)
instances = []
for instance in json_output:
instances.append(instance['instance'].split('/')[-1])
return instances
def _IsDeleting(self):
cmd = util.GcloudCommand(
self, 'container', 'clusters', 'describe', self.name)
stdout, _, _ = cmd.Issue(raise_on_failure=False)
return True if stdout else False
def _Delete(self):
"""Deletes the cluster."""
cmd = util.GcloudCommand(
self, 'container', 'clusters', 'delete', self.name)
cmd.args.append('--async')
cmd.Issue(raise_on_failure=False)
def _Exists(self):
"""Returns True if the cluster exits."""
cmd = util.GcloudCommand(
self, 'container', 'clusters', 'describe', self.name)
_, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)
return retcode == 0
|
import logging
from apcaccess.status import ALL_UNITS
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_RESOURCES,
ELECTRICAL_CURRENT_AMPERE,
ELECTRICAL_VOLT_AMPERE,
FREQUENCY_HERTZ,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
TIME_MINUTES,
TIME_SECONDS,
VOLT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
SENSOR_PREFIX = "UPS "
SENSOR_TYPES = {
"alarmdel": ["Alarm Delay", "", "mdi:alarm"],
"ambtemp": ["Ambient Temperature", "", "mdi:thermometer"],
"apc": ["Status Data", "", "mdi:information-outline"],
"apcmodel": ["Model", "", "mdi:information-outline"],
"badbatts": ["Bad Batteries", "", "mdi:information-outline"],
"battdate": ["Battery Replaced", "", "mdi:calendar-clock"],
"battstat": ["Battery Status", "", "mdi:information-outline"],
"battv": ["Battery Voltage", VOLT, "mdi:flash"],
"bcharge": ["Battery", PERCENTAGE, "mdi:battery"],
"cable": ["Cable Type", "", "mdi:ethernet-cable"],
"cumonbatt": ["Total Time on Battery", "", "mdi:timer-outline"],
"date": ["Status Date", "", "mdi:calendar-clock"],
"dipsw": ["Dip Switch Settings", "", "mdi:information-outline"],
"dlowbatt": ["Low Battery Signal", "", "mdi:clock-alert"],
"driver": ["Driver", "", "mdi:information-outline"],
"dshutd": ["Shutdown Delay", "", "mdi:timer-outline"],
"dwake": ["Wake Delay", "", "mdi:timer-outline"],
"endapc": ["Date and Time", "", "mdi:calendar-clock"],
"extbatts": ["External Batteries", "", "mdi:information-outline"],
"firmware": ["Firmware Version", "", "mdi:information-outline"],
"hitrans": ["Transfer High", VOLT, "mdi:flash"],
"hostname": ["Hostname", "", "mdi:information-outline"],
"humidity": ["Ambient Humidity", PERCENTAGE, "mdi:water-percent"],
"itemp": ["Internal Temperature", TEMP_CELSIUS, "mdi:thermometer"],
"lastxfer": ["Last Transfer", "", "mdi:transfer"],
"linefail": ["Input Voltage Status", "", "mdi:information-outline"],
"linefreq": ["Line Frequency", FREQUENCY_HERTZ, "mdi:information-outline"],
"linev": ["Input Voltage", VOLT, "mdi:flash"],
"loadpct": ["Load", PERCENTAGE, "mdi:gauge"],
"loadapnt": ["Load Apparent Power", PERCENTAGE, "mdi:gauge"],
"lotrans": ["Transfer Low", VOLT, "mdi:flash"],
"mandate": ["Manufacture Date", "", "mdi:calendar"],
"masterupd": ["Master Update", "", "mdi:information-outline"],
"maxlinev": ["Input Voltage High", VOLT, "mdi:flash"],
"maxtime": ["Battery Timeout", "", "mdi:timer-off-outline"],
"mbattchg": ["Battery Shutdown", PERCENTAGE, "mdi:battery-alert"],
"minlinev": ["Input Voltage Low", VOLT, "mdi:flash"],
"mintimel": ["Shutdown Time", "", "mdi:timer-outline"],
"model": ["Model", "", "mdi:information-outline"],
"nombattv": ["Battery Nominal Voltage", VOLT, "mdi:flash"],
"nominv": ["Nominal Input Voltage", VOLT, "mdi:flash"],
"nomoutv": ["Nominal Output Voltage", VOLT, "mdi:flash"],
"nompower": ["Nominal Output Power", POWER_WATT, "mdi:flash"],
"nomapnt": ["Nominal Apparent Power", ELECTRICAL_VOLT_AMPERE, "mdi:flash"],
"numxfers": ["Transfer Count", "", "mdi:counter"],
"outcurnt": ["Output Current", ELECTRICAL_CURRENT_AMPERE, "mdi:flash"],
"outputv": ["Output Voltage", VOLT, "mdi:flash"],
"reg1": ["Register 1 Fault", "", "mdi:information-outline"],
"reg2": ["Register 2 Fault", "", "mdi:information-outline"],
"reg3": ["Register 3 Fault", "", "mdi:information-outline"],
"retpct": ["Restore Requirement", PERCENTAGE, "mdi:battery-alert"],
"selftest": ["Last Self Test", "", "mdi:calendar-clock"],
"sense": ["Sensitivity", "", "mdi:information-outline"],
"serialno": ["Serial Number", "", "mdi:information-outline"],
"starttime": ["Startup Time", "", "mdi:calendar-clock"],
"statflag": ["Status Flag", "", "mdi:information-outline"],
"status": ["Status", "", "mdi:information-outline"],
"stesti": ["Self Test Interval", "", "mdi:information-outline"],
"timeleft": ["Time Left", "", "mdi:clock-alert"],
"tonbatt": ["Time on Battery", "", "mdi:timer-outline"],
"upsmode": ["Mode", "", "mdi:information-outline"],
"upsname": ["Name", "", "mdi:information-outline"],
"version": ["Daemon Info", "", "mdi:information-outline"],
"xoffbat": ["Transfer from Battery", "", "mdi:transfer"],
"xoffbatt": ["Transfer from Battery", "", "mdi:transfer"],
"xonbatt": ["Transfer to Battery", "", "mdi:transfer"],
}
SPECIFIC_UNITS = {"ITEMP": TEMP_CELSIUS}
INFERRED_UNITS = {
" Minutes": TIME_MINUTES,
" Seconds": TIME_SECONDS,
" Percent": PERCENTAGE,
" Volts": VOLT,
" Ampere": ELECTRICAL_CURRENT_AMPERE,
" Volt-Ampere": ELECTRICAL_VOLT_AMPERE,
" Watts": POWER_WATT,
" Hz": FREQUENCY_HERTZ,
" C": TEMP_CELSIUS,
" Percent Load Capacity": PERCENTAGE,
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RESOURCES, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
)
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the APCUPSd sensors."""
apcups_data = hass.data[DOMAIN]
entities = []
for resource in config[CONF_RESOURCES]:
sensor_type = resource.lower()
if sensor_type not in SENSOR_TYPES:
SENSOR_TYPES[sensor_type] = [
sensor_type.title(),
"",
"mdi:information-outline",
]
if sensor_type.upper() not in apcups_data.status:
_LOGGER.warning(
"Sensor type: %s does not appear in the APCUPSd status output",
sensor_type,
)
entities.append(APCUPSdSensor(apcups_data, sensor_type))
add_entities(entities, True)
def infer_unit(value):
"""If the value ends with any of the units from ALL_UNITS.
Split the unit off the end of the value and return the value, unit tuple
pair. Else return the original value and None as the unit.
"""
for unit in ALL_UNITS:
if value.endswith(unit):
return value[: -len(unit)], INFERRED_UNITS.get(unit, unit.strip())
return value, None
class APCUPSdSensor(Entity):
"""Representation of a sensor entity for APCUPSd status values."""
def __init__(self, data, sensor_type):
"""Initialize the sensor."""
self._data = data
self.type = sensor_type
self._name = SENSOR_PREFIX + SENSOR_TYPES[sensor_type][0]
self._unit = SENSOR_TYPES[sensor_type][1]
self._inferred_unit = None
self._state = None
@property
def name(self):
"""Return the name of the UPS sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def state(self):
"""Return true if the UPS is online, else False."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
if not self._unit:
return self._inferred_unit
return self._unit
def update(self):
"""Get the latest status and use it to update our sensor state."""
if self.type.upper() not in self._data.status:
self._state = None
self._inferred_unit = None
else:
self._state, self._inferred_unit = infer_unit(
self._data.status[self.type.upper()]
)
|
import numpy as np
import os
import shutil
import tempfile
import unittest
from chainer import testing
from chainer.testing import attr
from chainercv.datasets.cityscapes.cityscapes_utils import cityscapes_labels
from chainercv.datasets import CityscapesSemanticSegmentationDataset
from chainercv.datasets import CityscapesTestImageDataset
from chainercv.utils import assert_is_semantic_segmentation_dataset
from chainercv.utils.testing.assertions.assert_is_image import assert_is_image
from chainercv.utils import write_image
@testing.parameterize(
{'split': 'train', 'n_class': 19, 'label_mode': 'fine',
'ignore_labels': True},
{'split': 'val', 'n_class': 34, 'label_mode': 'coarse',
'ignore_labels': False}
)
class TestCityscapesSemanticSegmentationDataset(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
img_dir = os.path.join(
self.temp_dir, 'leftImg8bit/{}/aachen'.format(self.split))
resol = 'gtFine' if self.label_mode == 'fine' else 'gtCoarse'
label_dir = os.path.join(
self.temp_dir, '{}/{}/aachen'.format(resol, self.split))
os.makedirs(img_dir)
os.makedirs(label_dir)
for i in range(10):
img = np.random.randint(
0, 255, size=(3, 128, 160)).astype(np.uint8)
write_image(img, os.path.join(
img_dir, 'aachen_000000_0000{:02d}_leftImg8bit.png'.format(i)))
label = np.random.randint(
0, 34, size=(1, 128, 160)).astype(np.int32)
write_image(label, os.path.join(
label_dir,
'aachen_000000_0000{:02d}_{}_labelIds.png'.format(i, resol)))
self.dataset = CityscapesSemanticSegmentationDataset(
self.temp_dir, self.label_mode, self.split, self.ignore_labels)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_ignore_labels(self):
for _, label_orig in self.dataset:
H, W = label_orig.shape
label_out = np.ones((H, W), dtype=np.int32) * -1
for label in cityscapes_labels:
label_out[label_orig == label.trainId] = label.id
@attr.slow
def test_cityscapes_semantic_segmentation_dataset(self):
assert_is_semantic_segmentation_dataset(
self.dataset, self.n_class, n_example=10)
class TestCityscapesTestImageDataset(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
img_dir = os.path.join(
self.temp_dir, 'leftImg8bit/test/berlin')
os.makedirs(img_dir)
for i in range(10):
img = np.random.randint(
0, 255, size=(3, 128, 160)).astype(np.uint8)
write_image(img, os.path.join(
img_dir, 'berlin_000000_0000{:02d}_leftImg8bit.png'.format(i)))
self.dataset = CityscapesTestImageDataset(self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
@attr.slow
def test_cityscapes_dataset(self):
indices = np.random.permutation(np.arange(len(self.dataset)))
for i in indices[:10]:
img = self.dataset[i]
assert_is_image(img, color=True)
testing.run_module(__name__, __file__)
|
from copy import deepcopy
import numpy as np
from scipy import linalg
from .constants import FIFF
from .meas_info import _check_ch_keys
from .proj import _has_eeg_average_ref_proj, make_eeg_average_ref_proj
from .proj import setup_proj
from .pick import pick_types, pick_channels, pick_channels_forward
from .base import BaseRaw
from ..evoked import Evoked
from ..epochs import BaseEpochs
from ..utils import (logger, warn, verbose, _validate_type, _check_preload,
_check_option)
from ..defaults import DEFAULTS
def _copy_channel(inst, ch_name, new_ch_name):
"""Add a copy of a channel specified by ch_name.
Input data can be in the form of Raw, Epochs or Evoked.
The instance object is modified inplace.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Data containing the EEG channels
ch_name : str
Name of the channel to copy.
new_ch_name : str
Name given to the copy of the channel.
Returns
-------
inst : instance of Raw | Epochs | Evoked
The data with a copy of a given channel.
"""
new_inst = inst.copy().pick_channels([ch_name])
new_inst.rename_channels({ch_name: new_ch_name})
inst.add_channels([new_inst], force_update_info=True)
return inst
def _apply_reference(inst, ref_from, ref_to=None, forward=None,
ch_type='auto'):
"""Apply a custom EEG referencing scheme."""
# Check to see that data is preloaded
_check_preload(inst, "Applying a reference")
ch_type = _get_ch_type(inst, ch_type)
ch_dict = {ch_type: True, 'meg': False, 'ref_meg': False}
eeg_idx = pick_types(inst.info, **ch_dict)
if ref_to is None:
ref_to = [inst.ch_names[i] for i in eeg_idx]
extra = 'EEG channels found'
else:
extra = 'channels supplied'
if len(ref_to) == 0:
raise ValueError('No %s to apply the reference to' % (extra,))
# After referencing, existing SSPs might not be valid anymore.
projs_to_remove = []
for i, proj in enumerate(inst.info['projs']):
# Remove any average reference projections
if proj['desc'] == 'Average EEG reference' or \
proj['kind'] == FIFF.FIFFV_PROJ_ITEM_EEG_AVREF:
logger.info('Removing existing average EEG reference '
'projection.')
# Don't remove the projection right away, but do this at the end of
# this loop.
projs_to_remove.append(i)
# Inactive SSPs may block re-referencing
elif (not proj['active'] and
len([ch for ch in (ref_from + ref_to)
if ch in proj['data']['col_names']]) > 0):
raise RuntimeError(
'Inactive signal space projection (SSP) operators are '
'present that operate on sensors involved in the desired '
'referencing scheme. These projectors need to be applied '
'using the apply_proj() method function before the desired '
'reference can be set.'
)
for i in projs_to_remove:
del inst.info['projs'][i]
# Need to call setup_proj after changing the projs:
inst._projector, _ = \
setup_proj(inst.info, add_eeg_ref=False, activate=False)
# Compute reference
if len(ref_from) > 0:
# this is guaranteed below, but we should avoid the crazy pick_channels
# behavior that [] gives all. Also use ordered=True just to make sure
# that all supplied channels actually exist.
assert len(ref_to) > 0
ref_names = ref_from
ref_from = pick_channels(inst.ch_names, ref_from, ordered=True)
ref_to = pick_channels(inst.ch_names, ref_to, ordered=True)
data = inst._data
ref_data = data[..., ref_from, :].mean(-2, keepdims=True)
data[..., ref_to, :] -= ref_data
ref_data = ref_data[..., 0, :]
# If the reference touches EEG/ECoG/sEEG electrodes, note in the info
# that a non-CAR has been applied.
if len(np.intersect1d(ref_to, eeg_idx)) > 0:
inst.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_ON
# REST
if forward is not None:
# use ch_sel and the given forward
forward = pick_channels_forward(forward, ref_names, ordered=True)
# 1-3. Compute a forward (G) and avg-ref'ed data (done above)
G = forward['sol']['data']
assert G.shape[0] == len(ref_names)
# 4. Compute the forward (G) and average-reference it (Ga):
Ga = G - np.mean(G, axis=0, keepdims=True)
# 5. Compute the Ga_inv by SVD
Ga_inv = linalg.pinv(Ga, rcond=1e-6)
# 6. Compute Ra = (G @ Ga_inv) in eq (8) from G and Ga_inv
Ra = G @ Ga_inv
# 7-8. Compute Vp = Ra @ Va; then Vpa=average(Vp)
Vpa = np.mean(Ra @ data[..., ref_from, :], axis=-2, keepdims=True)
data[..., ref_to, :] += Vpa
else:
ref_data = None
return inst, ref_data
def add_reference_channels(inst, ref_channels, copy=True):
"""Add reference channels to data that consists of all zeros.
Adds reference channels to data that were not included during recording.
This is useful when you need to re-reference your data to different
channels. These added channels will consist of all zeros.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Instance of Raw or Epochs with EEG channels and reference channel(s).
ref_channels : str | list of str
Name of the electrode(s) which served as the reference in the
recording. If a name is provided, a corresponding channel is added
and its data is set to 0. This is useful for later re-referencing.
copy : bool
Specifies whether the data will be copied (True) or modified in-place
(False). Defaults to True.
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with added EEG reference channels.
"""
# Check to see that data is preloaded
_check_preload(inst, 'add_reference_channels')
_validate_type(ref_channels, (list, tuple, str), 'ref_channels')
if isinstance(ref_channels, str):
ref_channels = [ref_channels]
for ch in ref_channels:
if ch in inst.info['ch_names']:
raise ValueError("Channel %s already specified in inst." % ch)
# Once CAR is applied (active), don't allow adding channels
if _has_eeg_average_ref_proj(inst.info['projs'], check_active=True):
raise RuntimeError('Average reference already applied to data.')
if copy:
inst = inst.copy()
if isinstance(inst, (BaseRaw, Evoked)):
data = inst._data
refs = np.zeros((len(ref_channels), data.shape[1]))
data = np.vstack((data, refs))
inst._data = data
elif isinstance(inst, BaseEpochs):
data = inst._data
x, y, z = data.shape
refs = np.zeros((x * len(ref_channels), z))
data = np.vstack((data.reshape((x * y, z), order='F'), refs))
data = data.reshape(x, y + len(ref_channels), z, order='F')
inst._data = data
else:
raise TypeError("inst should be Raw, Epochs, or Evoked instead of %s."
% type(inst))
nchan = len(inst.info['ch_names'])
# only do this if we actually have digitisation points
if inst.info.get('dig', None) is not None:
# "zeroth" EEG electrode dig points is reference
ref_dig_loc = [dl for dl in inst.info['dig'] if (
dl['kind'] == FIFF.FIFFV_POINT_EEG and
dl['ident'] == 0)]
if len(ref_channels) > 1 or len(ref_dig_loc) != len(ref_channels):
ref_dig_array = np.zeros(12)
warn('The locations of multiple reference channels are ignored '
'(set to zero).')
else: # n_ref_channels == 1 and a single ref digitization exists
ref_dig_array = np.concatenate((ref_dig_loc[0]['r'],
ref_dig_loc[0]['r'], np.zeros(6)))
# Replace the (possibly new) Ref location for each channel
for idx in pick_types(inst.info, meg=False, eeg=True, exclude=[]):
inst.info['chs'][idx]['loc'][3:6] = ref_dig_loc[0]['r']
else:
# we should actually be able to do this from the montage, but
# it looks like the montage isn't stored, so we can't extract
# this information. The user will just have to call set_montage()
# by setting this to zero, we fall back to the old behavior
# when missing digitisation
ref_dig_array = np.zeros(12)
for ch in ref_channels:
chan_info = {'ch_name': ch,
'coil_type': FIFF.FIFFV_COIL_EEG,
'kind': FIFF.FIFFV_EEG_CH,
'logno': nchan + 1,
'scanno': nchan + 1,
'cal': 1,
'range': 1.,
'unit_mul': 0.,
'unit': FIFF.FIFF_UNIT_V,
'coord_frame': FIFF.FIFFV_COORD_HEAD,
'loc': ref_dig_array}
inst.info['chs'].append(chan_info)
inst.info._update_redundant()
if isinstance(inst, BaseRaw):
inst._cals = np.hstack((inst._cals, [1] * len(ref_channels)))
range_ = np.arange(1, len(ref_channels) + 1)
for pi, picks in enumerate(inst._read_picks):
inst._read_picks[pi] = np.concatenate(
[picks, np.max(picks) + range_])
inst.info._check_consistency()
set_eeg_reference(inst, ref_channels=ref_channels, copy=False,
verbose=False)
return inst
_ref_dict = {
FIFF.FIFFV_MNE_CUSTOM_REF_ON: 'on',
FIFF.FIFFV_MNE_CUSTOM_REF_OFF: 'off',
FIFF.FIFFV_MNE_CUSTOM_REF_CSD: 'CSD',
}
def _check_can_reref(inst):
_validate_type(inst, (BaseRaw, BaseEpochs, Evoked), "Instance")
current_custom = inst.info['custom_ref_applied']
if current_custom not in (FIFF.FIFFV_MNE_CUSTOM_REF_ON,
FIFF.FIFFV_MNE_CUSTOM_REF_OFF):
raise RuntimeError('Cannot set new reference on data with custom '
'reference type %r' % (_ref_dict[current_custom],))
@verbose
def set_eeg_reference(inst, ref_channels='average', copy=True,
projection=False, ch_type='auto', forward=None,
verbose=None):
"""Specify which reference to use for EEG data.
Use this function to explicitly specify the desired reference for EEG.
This can be either an existing electrode or a new virtual channel.
This function will re-reference the data according to the desired
reference.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Instance of Raw or Epochs with EEG channels and reference channel(s).
%(set_eeg_reference_ref_channels)s
copy : bool
Specifies whether the data will be copied (True) or modified in-place
(False). Defaults to True.
%(set_eeg_reference_projection)s
%(set_eeg_reference_ch_type)s
%(set_eeg_reference_forward)s
%(verbose)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with EEG channels re-referenced. If ``ref_channels='average'`` and
``projection=True`` a projection will be added instead of directly
re-referencing the data.
ref_data : array
Array of reference data subtracted from EEG channels. This will be
``None`` if ``projection=True`` or ``ref_channels='REST'``.
%(set_eeg_reference_see_also_notes)s
"""
from ..forward import Forward
_check_can_reref(inst)
if projection: # average reference projector
if ref_channels != 'average':
raise ValueError('Setting projection=True is only supported for '
'ref_channels="average", got %r.'
% (ref_channels,))
if _has_eeg_average_ref_proj(inst.info['projs']):
warn('An average reference projection was already added. The data '
'has been left untouched.')
else:
# Creating an average reference may fail. In this case, make
# sure that the custom_ref_applied flag is left untouched.
custom_ref_applied = inst.info['custom_ref_applied']
try:
inst.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_OFF
inst.add_proj(make_eeg_average_ref_proj(inst.info,
activate=False))
except Exception:
inst.info['custom_ref_applied'] = custom_ref_applied
raise
# If the data has been preloaded, projections will no
# longer be automatically applied.
if inst.preload:
logger.info('Average reference projection was added, '
'but has not been applied yet. Use the '
'apply_proj method to apply it.')
return inst, None
del projection # not used anymore
inst = inst.copy() if copy else inst
ch_type = _get_ch_type(inst, ch_type)
ch_dict = {ch_type: True, 'meg': False, 'ref_meg': False}
eeg_idx = pick_types(inst.info, **ch_dict)
ch_sel = [inst.ch_names[i] for i in eeg_idx]
if ref_channels == 'REST':
_validate_type(forward, Forward, 'forward when ref_channels="REST"')
else:
forward = None # signal to _apply_reference not to do REST
if ref_channels in ('average', 'REST'):
logger.info(f'Applying {ref_channels} reference.')
ref_channels = ch_sel
if ref_channels == []:
logger.info('EEG data marked as already having the desired reference.')
else:
logger.info('Applying a custom %s '
'reference.' % DEFAULTS['titles'][ch_type])
return _apply_reference(inst, ref_channels, ch_sel, forward,
ch_type=ch_type)
def _get_ch_type(inst, ch_type):
_validate_type(ch_type, str, 'ch_type')
_check_option('ch_type', ch_type, ('auto', 'eeg', 'ecog', 'seeg'))
# if ch_type is 'auto', search through list to find first reasonable
# reference-able channel type.
if ch_type == 'auto':
for type_ in ['eeg', 'ecog', 'seeg']:
if type_ in inst:
ch_type = type_
logger.info('%s channel type selected for '
're-referencing' % DEFAULTS['titles'][type_])
break
# if auto comes up empty, or the user specifies a bad ch_type.
else:
raise ValueError('No EEG, ECoG or sEEG channels found '
'to rereference.')
return ch_type
@verbose
def set_bipolar_reference(inst, anode, cathode, ch_name=None, ch_info=None,
drop_refs=True, copy=True, verbose=None):
"""Re-reference selected channels using a bipolar referencing scheme.
A bipolar reference takes the difference between two channels (the anode
minus the cathode) and adds it as a new virtual channel. The original
channels will be dropped.
Multiple anodes and cathodes can be specified, in which case multiple
virtual channels will be created. The 1st anode will be subtracted from the
1st cathode, the 2nd anode from the 2nd cathode, etc.
By default, the virtual channels will be annotated with channel info of
the anodes, their locations set to (0, 0, 0) and coil types set to
EEG_BIPOLAR.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Data containing the unreferenced channels.
anode : str | list of str
The name(s) of the channel(s) to use as anode in the bipolar reference.
cathode : str | list of str
The name(s) of the channel(s) to use as cathode in the bipolar
reference.
ch_name : str | list of str | None
The channel name(s) for the virtual channel(s) containing the resulting
signal. By default, bipolar channels are named after the anode and
cathode, but it is recommended to supply a more meaningful name.
ch_info : dict | list of dict | None
This parameter can be used to supply a dictionary (or a dictionary for
each bipolar channel) containing channel information to merge in,
overwriting the default values. Defaults to None.
drop_refs : bool
Whether to drop the anode/cathode channels from the instance.
copy : bool
Whether to operate on a copy of the data (True) or modify it in-place
(False). Defaults to True.
%(verbose)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with the specified channels re-referenced.
See Also
--------
set_eeg_reference : Convenience function for creating an EEG reference.
Notes
-----
1. If the anodes contain any EEG channels, this function removes
any pre-existing average reference projections.
2. During source localization, the EEG signal should have an average
reference.
3. The data must be preloaded.
.. versionadded:: 0.9.0
"""
_check_can_reref(inst)
if not isinstance(anode, list):
anode = [anode]
if not isinstance(cathode, list):
cathode = [cathode]
if len(anode) != len(cathode):
raise ValueError('Number of anodes (got %d) must equal the number '
'of cathodes (got %d).' % (len(anode), len(cathode)))
if ch_name is None:
ch_name = ['%s-%s' % ac for ac in zip(anode, cathode)]
elif not isinstance(ch_name, list):
ch_name = [ch_name]
if len(ch_name) != len(anode):
raise ValueError('Number of channel names must equal the number of '
'anodes/cathodes (got %d).' % len(ch_name))
# Check for duplicate channel names (it is allowed to give the name of the
# anode or cathode channel, as they will be replaced).
for ch, a, c in zip(ch_name, anode, cathode):
if ch not in [a, c] and ch in inst.ch_names:
raise ValueError('There is already a channel named "%s", please '
'specify a different name for the bipolar '
'channel using the ch_name parameter.' % ch)
if ch_info is None:
ch_info = [{} for _ in anode]
elif not isinstance(ch_info, list):
ch_info = [ch_info]
if len(ch_info) != len(anode):
raise ValueError('Number of channel info dictionaries must equal the '
'number of anodes/cathodes.')
# Merge specified and anode channel information dictionaries
new_chs = []
for ci, (an, ch) in enumerate(zip(anode, ch_info)):
_check_ch_keys(ch, ci, name='ch_info', check_min=False)
an_idx = inst.ch_names.index(an)
this_chs = deepcopy(inst.info['chs'][an_idx])
# Set channel location and coil type
this_chs['loc'] = np.zeros(12)
this_chs['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR
this_chs.update(ch)
new_chs.append(this_chs)
if copy:
inst = inst.copy()
for i, (an, ca, name, chs) in enumerate(
zip(anode, cathode, ch_name, new_chs)):
if an in anode[i + 1:] or an in cathode[i + 1:] or not drop_refs:
# Make a copy of the channel if it's still needed later
# otherwise it's modified inplace
_copy_channel(inst, an, 'TMP')
an = 'TMP'
_apply_reference(inst, [ca], [an]) # ensures preloaded
an_idx = inst.ch_names.index(an)
inst.info['chs'][an_idx] = chs
inst.info['chs'][an_idx]['ch_name'] = name
logger.info('Bipolar channel added as "%s".' % name)
inst.info._update_redundant()
# Drop remaining channels.
if drop_refs:
drop_channels = list((set(anode) | set(cathode)) & set(inst.ch_names))
inst.drop_channels(drop_channels)
return inst
|
from __future__ import annotations
import asyncio
import collections.abc
import json
import logging
import os
import re
import shutil
import tarfile
from datetime import datetime
from pathlib import Path
from typing import (
AsyncIterator,
Awaitable,
Callable,
Iterator,
List,
Optional,
Union,
TYPE_CHECKING,
Tuple,
)
import aiohttp
import discord
import pkg_resources
from fuzzywuzzy import fuzz, process
from redbot import VersionInfo
from redbot.core import data_manager
from redbot.core.utils.chat_formatting import box
if TYPE_CHECKING:
from redbot.core.bot import Red
from redbot.core.commands import Command, Context
main_log = logging.getLogger("red")
__all__ = (
"safe_delete",
"fuzzy_command_search",
"format_fuzzy_results",
"create_backup",
"send_to_owners_with_preprocessor",
"send_to_owners_with_prefix_replaced",
"expected_version",
"fetch_latest_red_version_info",
)
def safe_delete(pth: Path):
if pth.exists():
for root, dirs, files in os.walk(str(pth)):
os.chmod(root, 0o700)
for d in dirs:
os.chmod(os.path.join(root, d), 0o700)
for f in files:
os.chmod(os.path.join(root, f), 0o700)
shutil.rmtree(str(pth), ignore_errors=True)
def _fuzzy_log_filter(record):
return record.funcName != "extractWithoutOrder"
logging.getLogger().addFilter(_fuzzy_log_filter)
async def fuzzy_command_search(
ctx: Context,
term: Optional[str] = None,
*,
commands: Optional[Union[AsyncIterator[Command], Iterator[Command]]] = None,
min_score: int = 80,
) -> Optional[List[Command]]:
"""Search for commands which are similar in name to the one invoked.
Returns a maximum of 5 commands which must all be at least matched
greater than ``min_score``.
Parameters
----------
ctx : `commands.Context <redbot.core.commands.Context>`
The command invocation context.
term : Optional[str]
The name of the invoked command. If ``None``,
`Context.invoked_with` will be used instead.
commands : Optional[Union[AsyncIterator[commands.Command], Iterator[commands.Command]]]
The commands available to choose from when doing a fuzzy match.
When omitted, `Bot.walk_commands` will be used instead.
min_score : int
The minimum score for matched commands to reach. Defaults to 80.
Returns
-------
Optional[List[`commands.Command <redbot.core.commands.Command>`]]
A list of commands which were fuzzily matched with the invoked
command.
"""
if ctx.guild is not None:
enabled = await ctx.bot._config.guild(ctx.guild).fuzzy()
else:
enabled = await ctx.bot._config.fuzzy()
if not enabled:
return None
if term is None:
term = ctx.invoked_with
# If the term is an alias or CC, we don't want to send a supplementary fuzzy search.
alias_cog = ctx.bot.get_cog("Alias")
if alias_cog is not None:
alias = await alias_cog._aliases.get_alias(ctx.guild, term)
if alias:
return None
customcom_cog = ctx.bot.get_cog("CustomCommands")
if customcom_cog is not None:
cmd_obj = customcom_cog.commandobj
try:
await cmd_obj.get(ctx.message, term)
except:
pass
else:
return None
if commands is None:
choices = set(ctx.bot.walk_commands())
elif isinstance(commands, collections.abc.AsyncIterator):
choices = {c async for c in commands}
else:
choices = set(commands)
# Do the scoring. `extracted` is a list of tuples in the form `(command, score)`
extracted = process.extract(term, choices, limit=5, scorer=fuzz.QRatio)
if not extracted:
return None
# Filter through the fuzzy-matched commands.
matched_commands = []
for command, score in extracted:
if score < min_score:
# Since the list is in decreasing order of score, we can exit early.
break
if await command.can_see(ctx):
matched_commands.append(command)
return matched_commands
async def format_fuzzy_results(
ctx: Context, matched_commands: List[Command], *, embed: Optional[bool] = None
) -> Union[str, discord.Embed]:
"""Format the result of a fuzzy command search.
Parameters
----------
ctx : `commands.Context <redbot.core.commands.Context>`
The context in which this result is being displayed.
matched_commands : List[`commands.Command <redbot.core.commands.Command>`]
A list of commands which have been matched by the fuzzy search, sorted
in order of decreasing similarity.
embed : bool
Whether or not the result should be an embed. If set to ``None``, this
will default to the result of `ctx.embed_requested`.
Returns
-------
Union[str, discord.Embed]
The formatted results.
"""
if embed is not False and (embed is True or await ctx.embed_requested()):
lines = []
for cmd in matched_commands:
short_doc = cmd.format_shortdoc_for_context(ctx)
lines.append(f"**{ctx.clean_prefix}{cmd.qualified_name}** {short_doc}")
return discord.Embed(
title="Perhaps you wanted one of these?",
colour=await ctx.embed_colour(),
description="\n".join(lines),
)
else:
lines = []
for cmd in matched_commands:
short_doc = cmd.format_shortdoc_for_context(ctx)
lines.append(f"{ctx.clean_prefix}{cmd.qualified_name} -- {short_doc}")
return "Perhaps you wanted one of these? " + box("\n".join(lines), lang="vhdl")
async def create_backup(dest: Path = Path.home()) -> Optional[Path]:
data_path = Path(data_manager.core_data_path().parent)
if not data_path.exists():
return None
dest.mkdir(parents=True, exist_ok=True)
timestr = datetime.utcnow().strftime("%Y-%m-%dT%H-%M-%S")
backup_fpath = dest / f"redv3_{data_manager.instance_name}_{timestr}.tar.gz"
to_backup = []
exclusions = [
"__pycache__",
"Lavalink.jar",
os.path.join("Downloader", "lib"),
os.path.join("CogManager", "cogs"),
os.path.join("RepoManager", "repos"),
os.path.join("Audio", "logs"),
]
# Avoiding circular imports
from ...cogs.downloader.repo_manager import RepoManager
repo_mgr = RepoManager()
await repo_mgr.initialize()
repo_output = []
for repo in repo_mgr.repos:
repo_output.append({"url": repo.url, "name": repo.name, "branch": repo.branch})
repos_file = data_path / "cogs" / "RepoManager" / "repos.json"
with repos_file.open("w") as fs:
json.dump(repo_output, fs, indent=4)
instance_file = data_path / "instance.json"
with instance_file.open("w") as fs:
json.dump({data_manager.instance_name: data_manager.basic_config}, fs, indent=4)
for f in data_path.glob("**/*"):
if not any(ex in str(f) for ex in exclusions) and f.is_file():
to_backup.append(f)
with tarfile.open(str(backup_fpath), "w:gz") as tar:
for f in to_backup:
tar.add(str(f), arcname=str(f.relative_to(data_path)), recursive=False)
return backup_fpath
# this might be worth moving to `bot.send_to_owners` at later date
async def send_to_owners_with_preprocessor(
bot: Red,
content: str,
*,
content_preprocessor: Optional[
Callable[[Red, discord.abc.Messageable, str], Awaitable[str]]
] = None,
**kwargs,
):
"""
This sends something to all owners and their configured extra destinations.
This acts the same as `Red.send_to_owners`, with
one added keyword argument as detailed below in *Other Parameters*.
Other Parameters
----------------
content_preprocessor: Optional[Callable[[Red, discord.abc.Messageable, str], Awaitable[str]]]
Optional async function that takes
bot object, owner notification destination and message content
and returns the content that should be sent to given location.
"""
destinations = await bot.get_owner_notification_destinations()
async def wrapped_send(bot, location, content=None, preprocessor=None, **kwargs):
try:
if preprocessor is not None:
content = await preprocessor(bot, location, content)
await location.send(content, **kwargs)
except Exception as _exc:
main_log.error(
"I could not send an owner notification to %s (%s)",
location,
location.id,
exc_info=_exc,
)
sends = [wrapped_send(bot, d, content, content_preprocessor, **kwargs) for d in destinations]
await asyncio.gather(*sends)
async def send_to_owners_with_prefix_replaced(bot: Red, content: str, **kwargs):
"""
This sends something to all owners and their configured extra destinations.
This acts the same as `Red.send_to_owners`, with one addition - `[p]` in ``content`` argument
is replaced with a clean prefix for each specific destination.
"""
async def preprocessor(bot: Red, destination: discord.abc.Messageable, content: str) -> str:
prefixes = await bot.get_valid_prefixes(getattr(destination, "guild", None))
prefix = re.sub(
rf"<@!?{bot.user.id}>", f"@{bot.user.name}".replace("\\", r"\\"), prefixes[0]
)
return content.replace("[p]", prefix)
await send_to_owners_with_preprocessor(bot, content, content_preprocessor=preprocessor)
def expected_version(current: str, expected: str) -> bool:
# `pkg_resources` needs a regular requirement string, so "x" serves as requirement's name here
return current in pkg_resources.Requirement.parse(f"x{expected}")
async def fetch_latest_red_version_info() -> Tuple[Optional[VersionInfo], Optional[str]]:
try:
async with aiohttp.ClientSession() as session:
async with session.get("https://pypi.org/pypi/Red-DiscordBot/json") as r:
data = await r.json()
except (aiohttp.ClientError, asyncio.TimeoutError):
return None, None
else:
release = VersionInfo.from_str(data["info"]["version"])
required_python = data["info"]["requires_python"]
return release, required_python
|
import pytest
from homeassistant.components.cover import SERVICE_OPEN_COVER
from homeassistant.const import SERVICE_TOGGLE, SERVICE_TURN_OFF, SERVICE_TURN_ON
from homeassistant.helpers import intent
from homeassistant.setup import async_setup_component
from tests.common import async_mock_service
async def test_http_handle_intent(hass, hass_client, hass_admin_user):
"""Test handle intent via HTTP API."""
class TestIntentHandler(intent.IntentHandler):
"""Test Intent Handler."""
intent_type = "OrderBeer"
async def async_handle(self, intent):
"""Handle the intent."""
assert intent.context.user_id == hass_admin_user.id
response = intent.create_response()
response.async_set_speech(
"I've ordered a {}!".format(intent.slots["type"]["value"])
)
response.async_set_card(
"Beer ordered", "You chose a {}.".format(intent.slots["type"]["value"])
)
return response
intent.async_register(hass, TestIntentHandler())
result = await async_setup_component(hass, "intent", {})
assert result
client = await hass_client()
resp = await client.post(
"/api/intent/handle", json={"name": "OrderBeer", "data": {"type": "Belgian"}}
)
assert resp.status == 200
data = await resp.json()
assert data == {
"card": {
"simple": {"content": "You chose a Belgian.", "title": "Beer ordered"}
},
"speech": {"plain": {"extra_data": None, "speech": "I've ordered a Belgian!"}},
}
async def test_cover_intents_loading(hass):
"""Test Cover Intents Loading."""
assert await async_setup_component(hass, "intent", {})
with pytest.raises(intent.UnknownIntent):
await intent.async_handle(
hass, "test", "HassOpenCover", {"name": {"value": "garage door"}}
)
assert await async_setup_component(hass, "cover", {})
hass.states.async_set("cover.garage_door", "closed")
calls = async_mock_service(hass, "cover", SERVICE_OPEN_COVER)
response = await intent.async_handle(
hass, "test", "HassOpenCover", {"name": {"value": "garage door"}}
)
await hass.async_block_till_done()
assert response.speech["plain"]["speech"] == "Opened garage door"
assert len(calls) == 1
call = calls[0]
assert call.domain == "cover"
assert call.service == "open_cover"
assert call.data == {"entity_id": "cover.garage_door"}
async def test_turn_on_intent(hass):
"""Test HassTurnOn intent."""
result = await async_setup_component(hass, "homeassistant", {})
result = await async_setup_component(hass, "intent", {})
assert result
hass.states.async_set("light.test_light", "off")
calls = async_mock_service(hass, "light", SERVICE_TURN_ON)
response = await intent.async_handle(
hass, "test", "HassTurnOn", {"name": {"value": "test light"}}
)
await hass.async_block_till_done()
assert response.speech["plain"]["speech"] == "Turned test light on"
assert len(calls) == 1
call = calls[0]
assert call.domain == "light"
assert call.service == "turn_on"
assert call.data == {"entity_id": ["light.test_light"]}
async def test_turn_off_intent(hass):
"""Test HassTurnOff intent."""
result = await async_setup_component(hass, "homeassistant", {})
result = await async_setup_component(hass, "intent", {})
assert result
hass.states.async_set("light.test_light", "on")
calls = async_mock_service(hass, "light", SERVICE_TURN_OFF)
response = await intent.async_handle(
hass, "test", "HassTurnOff", {"name": {"value": "test light"}}
)
await hass.async_block_till_done()
assert response.speech["plain"]["speech"] == "Turned test light off"
assert len(calls) == 1
call = calls[0]
assert call.domain == "light"
assert call.service == "turn_off"
assert call.data == {"entity_id": ["light.test_light"]}
async def test_toggle_intent(hass):
"""Test HassToggle intent."""
result = await async_setup_component(hass, "homeassistant", {})
result = await async_setup_component(hass, "intent", {})
assert result
hass.states.async_set("light.test_light", "off")
calls = async_mock_service(hass, "light", SERVICE_TOGGLE)
response = await intent.async_handle(
hass, "test", "HassToggle", {"name": {"value": "test light"}}
)
await hass.async_block_till_done()
assert response.speech["plain"]["speech"] == "Toggled test light"
assert len(calls) == 1
call = calls[0]
assert call.domain == "light"
assert call.service == "toggle"
assert call.data == {"entity_id": ["light.test_light"]}
async def test_turn_on_multiple_intent(hass):
"""Test HassTurnOn intent with multiple similar entities.
This tests that matching finds the proper entity among similar names.
"""
result = await async_setup_component(hass, "homeassistant", {})
result = await async_setup_component(hass, "intent", {})
assert result
hass.states.async_set("light.test_light", "off")
hass.states.async_set("light.test_lights_2", "off")
hass.states.async_set("light.test_lighter", "off")
calls = async_mock_service(hass, "light", SERVICE_TURN_ON)
response = await intent.async_handle(
hass, "test", "HassTurnOn", {"name": {"value": "test lights"}}
)
await hass.async_block_till_done()
assert response.speech["plain"]["speech"] == "Turned test lights 2 on"
assert len(calls) == 1
call = calls[0]
assert call.domain == "light"
assert call.service == "turn_on"
assert call.data == {"entity_id": ["light.test_lights_2"]}
|
import logging
from requests.exceptions import ConnectTimeout, HTTPError
import voluptuous as vol
from wirelesstagpy import NotificationConfig as NC, WirelessTags, WirelessTagsException
from homeassistant import util
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_VOLTAGE,
CONF_PASSWORD,
CONF_USERNAME,
PERCENTAGE,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
VOLT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
# Strength of signal in dBm
ATTR_TAG_SIGNAL_STRENGTH = "signal_strength"
# Indicates if tag is out of range or not
ATTR_TAG_OUT_OF_RANGE = "out_of_range"
# Number in percents from max power of tag receiver
ATTR_TAG_POWER_CONSUMPTION = "power_consumption"
NOTIFICATION_ID = "wirelesstag_notification"
NOTIFICATION_TITLE = "Wireless Sensor Tag Setup"
DOMAIN = "wirelesstag"
DEFAULT_ENTITY_NAMESPACE = "wirelesstag"
# Template for signal - first parameter is tag_id,
# second, tag manager mac address
SIGNAL_TAG_UPDATE = "wirelesstag.tag_info_updated_{}_{}"
# Template for signal - tag_id, sensor type and
# tag manager mac address
SIGNAL_BINARY_EVENT_UPDATE = "wirelesstag.binary_event_updated_{}_{}_{}"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
class WirelessTagPlatform:
"""Principal object to manage all registered in HA tags."""
def __init__(self, hass, api):
"""Designated initializer for wirelesstags platform."""
self.hass = hass
self.api = api
self.tags = {}
self._local_base_url = None
@property
def tag_manager_macs(self):
"""Return list of tag managers mac addresses in user account."""
return self.api.mac_addresses
def load_tags(self):
"""Load tags from remote server."""
self.tags = self.api.load_tags()
return self.tags
def arm(self, switch):
"""Arm entity sensor monitoring."""
func_name = f"arm_{switch.sensor_type}"
arm_func = getattr(self.api, func_name)
if arm_func is not None:
arm_func(switch.tag_id, switch.tag_manager_mac)
def disarm(self, switch):
"""Disarm entity sensor monitoring."""
func_name = f"disarm_{switch.sensor_type}"
disarm_func = getattr(self.api, func_name)
if disarm_func is not None:
disarm_func(switch.tag_id, switch.tag_manager_mac)
def make_notifications(self, binary_sensors, mac):
"""Create configurations for push notifications."""
_LOGGER.info("Creating configurations for push notifications")
configs = []
bi_url = self.binary_event_callback_url
for bi_sensor in binary_sensors:
configs.extend(bi_sensor.event.build_notifications(bi_url, mac))
update_url = self.update_callback_url
update_config = NC.make_config_for_update_event(update_url, mac)
configs.append(update_config)
return configs
def install_push_notifications(self, binary_sensors):
"""Register local push notification from tag manager."""
_LOGGER.info("Registering local push notifications")
for mac in self.tag_manager_macs:
configs = self.make_notifications(binary_sensors, mac)
# install notifications for all tags in tag manager
# specified by mac
result = self.api.install_push_notification(0, configs, True, mac)
if not result:
self.hass.components.persistent_notification.create(
"Error: failed to install local push notifications <br />",
title="Wireless Sensor Tag Setup Local Push Notifications",
notification_id="wirelesstag_failed_push_notification",
)
else:
_LOGGER.info(
"Installed push notifications for all tags in %s",
mac,
)
@property
def local_base_url(self):
"""Define base url of hass in local network."""
if self._local_base_url is None:
self._local_base_url = f"http://{util.get_local_ip()}"
port = self.hass.config.api.port
if port is not None:
self._local_base_url += f":{port}"
return self._local_base_url
@property
def update_callback_url(self):
"""Return url for local push notifications(update event)."""
return f"{self.local_base_url}/api/events/wirelesstag_update_tags"
@property
def binary_event_callback_url(self):
"""Return url for local push notifications(binary event)."""
return f"{self.local_base_url}/api/events/wirelesstag_binary_event"
def handle_update_tags_event(self, event):
"""Handle push event from wireless tag manager."""
_LOGGER.info("push notification for update arrived: %s", event)
try:
tag_id = event.data.get("id")
mac = event.data.get("mac")
dispatcher_send(self.hass, SIGNAL_TAG_UPDATE.format(tag_id, mac), event)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error(
"Unable to handle tag update event:\
%s error: %s",
str(event),
str(ex),
)
def handle_binary_event(self, event):
"""Handle push notifications for binary (on/off) events."""
_LOGGER.info("Push notification for binary event arrived: %s", event)
try:
tag_id = event.data.get("id")
event_type = event.data.get("type")
mac = event.data.get("mac")
dispatcher_send(
self.hass,
SIGNAL_BINARY_EVENT_UPDATE.format(tag_id, event_type, mac),
event,
)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error(
"Unable to handle tag binary event:\
%s error: %s",
str(event),
str(ex),
)
def setup(hass, config):
"""Set up the Wireless Sensor Tag component."""
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
try:
wirelesstags = WirelessTags(username=username, password=password)
platform = WirelessTagPlatform(hass, wirelesstags)
platform.load_tags()
hass.data[DOMAIN] = platform
except (ConnectTimeout, HTTPError, WirelessTagsException) as ex:
_LOGGER.error("Unable to connect to wirelesstag.net service: %s", str(ex))
hass.components.persistent_notification.create(
f"Error: {ex}<br />Please restart hass after fixing this.",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
return False
# listen to custom events
hass.bus.listen(
"wirelesstag_update_tags", hass.data[DOMAIN].handle_update_tags_event
)
hass.bus.listen("wirelesstag_binary_event", hass.data[DOMAIN].handle_binary_event)
return True
class WirelessTagBaseSensor(Entity):
"""Base class for HA implementation for Wireless Sensor Tag."""
def __init__(self, api, tag):
"""Initialize a base sensor for Wireless Sensor Tag platform."""
self._api = api
self._tag = tag
self._uuid = self._tag.uuid
self.tag_id = self._tag.tag_id
self.tag_manager_mac = self._tag.tag_manager_mac
self._name = self._tag.name
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def principal_value(self):
"""Return base value.
Subclasses need override based on type of sensor.
"""
return 0
def updated_state_value(self):
"""Return formatted value.
The default implementation formats principal value.
"""
return self.decorate_value(self.principal_value)
# pylint: disable=no-self-use
def decorate_value(self, value):
"""Decorate input value to be well presented for end user."""
return f"{value:.1f}"
@property
def available(self):
"""Return True if entity is available."""
return self._tag.is_alive
def update(self):
"""Update state."""
if not self.should_poll:
return
updated_tags = self._api.load_tags()
updated_tag = updated_tags[self._uuid]
if updated_tag is None:
_LOGGER.error('Unable to update tag: "%s"', self.name)
return
self._tag = updated_tag
self._state = self.updated_state_value()
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_BATTERY_LEVEL: int(self._tag.battery_remaining * 100),
ATTR_VOLTAGE: f"{self._tag.battery_volts:.2f}{VOLT}",
ATTR_TAG_SIGNAL_STRENGTH: f"{self._tag.signal_strength}{SIGNAL_STRENGTH_DECIBELS_MILLIWATT}",
ATTR_TAG_OUT_OF_RANGE: not self._tag.is_in_range,
ATTR_TAG_POWER_CONSUMPTION: f"{self._tag.power_consumption:.2f}{PERCENTAGE}",
}
|
import tensornetwork as tn
from tensornetwork.backend_contextmanager import _default_backend_stack
import pytest
import numpy as np
import tensorflow as tf
import torch
import jax
np_dtypes = [np.float32, np.float64, np.complex64, np.complex128, np.int32]
tf_dtypes = [tf.float32, tf.float64, tf.complex64, tf.complex128, tf.int32]
torch_dtypes = [torch.float32, torch.float64, torch.int32, torch.int64]
jax_dtypes = [
jax.numpy.float32, jax.numpy.float64, jax.numpy.complex64,
jax.numpy.complex128, jax.numpy.int32
]
def test_network_copy_reordered(backend):
a = tn.Node(np.random.rand(3, 3, 3), backend=backend)
b = tn.Node(np.random.rand(3, 3, 3), backend=backend)
c = tn.Node(np.random.rand(3, 3, 3), backend=backend)
a[0] ^ b[1]
a[1] ^ c[2]
b[2] ^ c[0]
edge_order = [a[2], c[1], b[0]]
node_dict, edge_dict = tn.copy({a, b, c})
tn.check_correct({a, b, c})
res = a @ b @ c
res.reorder_edges(edge_order)
res_copy = node_dict[a] @ node_dict[b] @ node_dict[c]
res_copy.reorder_edges([edge_dict[e] for e in edge_order])
np.testing.assert_allclose(res.tensor, res_copy.tensor)
def test_add_node_names(backend):
a = tn.Node(np.eye(2), "a", axis_names=["e0", "e1"], backend=backend)
assert a.name == "a"
assert a[0].name == "e0"
assert a[1].name == "e1"
def test_add_copy_node_from_node_object(backend):
a = tn.CopyNode(
3, 3, name="TestName", axis_names=['a', 'b', 'c'], backend=backend)
assert a.shape == (3, 3, 3)
assert isinstance(a, tn.CopyNode)
assert a.name == "TestName"
assert a.axis_names == ['a', 'b', 'c']
b = tn.Node(np.eye(3), backend=backend)
e = a[0] ^ b[0]
c = tn.contract(e)
np.testing.assert_allclose(c.tensor, a.tensor)
def test_copy_node_method(backend):
a = tn.Node(
np.ones([3, 3, 3]),
name='mynode',
axis_names=['a', 'b', 'c'],
backend=backend)
a.add_edge(tn.Edge(a, 0, name='named_edge1'), 0)
a.add_edge(tn.Edge(a, 1, name='named_edge2'), 1)
a.add_edge(tn.Edge(a, 2, name='named_edge3'), 2)
b = a.copy()
assert a.name == b.name
assert a.shape == b.shape
assert a.axis_names == b.axis_names
for i in range(len(a.edges)):
assert a[i].name == b[i].name
np.testing.assert_allclose(a.tensor, b.tensor)
def test_copy_copynode_method(backend):
a = tn.CopyNode(3, 3, 'mynode', axis_names=['a', 'b', 'c'], backend=backend)
a.add_edge(tn.Edge(a, 0, name='named_edge1'), 0)
a.add_edge(tn.Edge(a, 1, name='named_edge2'), 1)
a.add_edge(tn.Edge(a, 2, name='named_edge3'), 2)
b = a.copy()
assert a.name == b.name
assert a.shape == b.shape
assert a.axis_names == b.axis_names
assert a.rank == b.rank
assert a.backend == b.backend
assert a.dtype == b.dtype
for i in range(len(a.edges)):
assert a[i].name == b[i].name
np.testing.assert_allclose(a.tensor, b.tensor)
def test_copy_method_with_trace_edges(backend):
a = tn.Node(
np.ones([3, 3, 3, 3, 3]),
name='mynode',
axis_names=['a', 'b', 'c', 'd', 'e'],
backend=backend)
a.add_edge(tn.Edge(a, 0, name='named_edge1'), 0)
a.add_edge(tn.Edge(a, 1, name='named_edge2'), 1)
a.add_edge(tn.Edge(a, 2, name='named_edge3'), 2)
a.add_edge(tn.Edge(a, 3, name='named_edge4'), 3)
a.add_edge(tn.Edge(a, 4, name='named_edge5'), 4)
a[0] ^ a[3]
a[1] ^ a[4]
b = a.copy()
assert a.name == b.name
assert a.shape == b.shape
assert a.axis_names == b.axis_names
for i in range(len(a.edges)):
assert a[i].name == b[i].name
assert b[0] is b[3]
assert b[1] is b[4]
np.testing.assert_allclose(a.tensor, b.tensor)
def test_default_names_add_node_object(backend):
a = tn.CopyNode(3, 3, backend=backend)
assert a.name is not None
assert len(a.axis_names) == 3
def test_set_tensor(backend):
a = tn.Node(np.ones(2), backend=backend)
np.testing.assert_allclose(a.tensor, np.ones(2))
a.tensor = np.zeros(2)
np.testing.assert_allclose(a.tensor, np.zeros(2))
def test_has_nondangling_edge(backend):
a = tn.Node(np.ones(2), backend=backend)
assert not a.has_nondangling_edge()
b = tn.Node(np.ones((2, 2)), backend=backend)
tn.connect(b[0], b[1])
assert b.has_nondangling_edge()
def test_large_nodes(backend):
a = tn.Node(np.zeros([5, 6, 7, 8, 9]), "a", backend=backend)
b = tn.Node(np.zeros([5, 6, 7, 8, 9]), "b", backend=backend)
for i in range(5):
tn.connect(a[i], b[i])
tn.check_correct({a, b})
def test_small_matmul(backend):
a = tn.Node(np.zeros([10, 10]), name="a", backend=backend)
b = tn.Node(np.zeros([10, 10]), name="b", backend=backend)
edge = tn.connect(a[0], b[0], "edge")
tn.check_correct({a, b})
c = tn.contract(edge, name="a * b")
assert list(c.shape) == [10, 10]
tn.check_correct({c})
def test_double_trace(backend):
a = tn.Node(np.ones([10, 10, 10, 10]), name="a", backend=backend)
edge1 = tn.connect(a[0], a[1], "edge1")
edge2 = tn.connect(a[2], a[3], "edge2")
tn.check_correct({a})
val = tn.contract(edge1)
tn.check_correct({val})
val = tn.contract(edge2)
tn.check_correct({val})
np.testing.assert_allclose(val.tensor, 100.0)
def test_indirect_trace(backend):
a = tn.Node(np.ones([10, 10]), name="a", backend=backend)
edge = tn.connect(a[0], a[1], "edge")
tn.check_correct({a})
val = tn.contract(edge)
tn.check_correct({val})
np.testing.assert_allclose(val.tensor, 10.0)
def test_real_physics(backend):
# Calcuate the expected value in numpy
a_vals = np.ones([2, 3, 4, 5])
b_vals = np.ones([4, 6, 7])
c_vals = np.ones([5, 6, 8])
contract1 = np.tensordot(a_vals, b_vals, [[2], [0]])
contract2 = np.tensordot(c_vals, contract1, [[0], [2]])
final_result = np.trace(contract2, axis1=0, axis2=4)
# Build the network
a = tn.Node(a_vals, name="T", backend=backend)
b = tn.Node(b_vals, name="A", backend=backend)
c = tn.Node(c_vals, name="B", backend=backend)
e1 = tn.connect(a[2], b[0], "edge")
e2 = tn.connect(c[0], a[3], "edge2")
e3 = tn.connect(b[1], c[1], "edge3")
tn.check_correct(tn.reachable(a))
node_result = tn.contract(e1)
np.testing.assert_allclose(node_result.tensor, contract1)
tn.check_correct(tn.reachable(node_result))
node_result = tn.contract(e2)
np.testing.assert_allclose(node_result.tensor, contract2)
tn.check_correct(tn.reachable(node_result))
val = tn.contract(e3)
tn.check_correct(tn.reachable(val))
np.testing.assert_allclose(val.tensor, final_result)
def test_real_physics_with_tensors(backend):
# Calcuate the expected value in numpy
a_vals = np.ones([2, 3, 4, 5])
b_vals = np.ones([4, 6, 7])
c_vals = np.ones([5, 6, 8])
contract1 = np.tensordot(a_vals, b_vals, [[2], [0]])
contract2 = np.tensordot(c_vals, contract1, [[0], [2]])
final_result = np.trace(contract2, axis1=0, axis2=4)
# Build the network
a = tn.Node(np.ones([2, 3, 4, 5]), name="T", backend=backend)
b = tn.Node(np.ones([4, 6, 7]), name="A", backend=backend)
c = tn.Node(np.ones([5, 6, 8]), name="B", backend=backend)
e1 = tn.connect(a[2], b[0], "edge")
e2 = tn.connect(c[0], a[3], "edge2")
e3 = tn.connect(b[1], c[1], "edge3")
tn.check_correct({a, b, c})
node_result = tn.contract(e1)
np.testing.assert_allclose(node_result.tensor, contract1)
tn.check_correct(tn.reachable(node_result))
node_result = tn.contract(e2)
np.testing.assert_allclose(node_result.tensor, contract2)
tn.check_correct(tn.reachable(node_result))
val = tn.contract(e3)
tn.check_correct(tn.reachable(val))
np.testing.assert_allclose(val.tensor, final_result)
def test_real_physics_naive_contraction(backend):
# Calcuate the expected value in numpy
a_vals = np.ones([2, 3, 4, 5])
b_vals = np.ones([4, 6, 7])
c_vals = np.ones([5, 6, 8])
contract1 = np.tensordot(a_vals, b_vals, [[2], [0]])
contract2 = np.tensordot(c_vals, contract1, [[0], [2]])
final_result = np.trace(contract2, axis1=0, axis2=4)
# Build the network
a = tn.Node(np.ones([2, 3, 4, 5]), name="T", backend=backend)
b = tn.Node(np.ones([4, 6, 7]), name="A", backend=backend)
c = tn.Node(np.ones([5, 6, 8]), name="B", backend=backend)
e1 = tn.connect(a[2], b[0], "edge")
e2 = tn.connect(c[0], a[3], "edge2")
e3 = tn.connect(b[1], c[1], "edge3")
for edge in [e1, e2, e3]:
val = tn.contract(edge)
assert list(val.shape) == [8, 2, 3, 7]
np.testing.assert_allclose(val.tensor, final_result)
def test_with_tensors(backend):
a = tn.Node(np.eye(2) * 2, name="T", backend=backend)
b = tn.Node(np.eye(2) * 3, name="A", backend=backend)
e1 = tn.connect(a[0], b[0], "edge")
e2 = tn.connect(a[1], b[1], "edge2")
tn.check_correct({a, b})
result = tn.contract(e1)
tn.check_correct(tn.reachable(result))
val = tn.contract(e2)
tn.check_correct(tn.reachable(val))
np.testing.assert_allclose(val.tensor, 12.0)
def test_node2_contract_trace(backend):
a = tn.Node(np.zeros([3, 3, 1]), backend=backend)
b = tn.Node(np.zeros([1]), backend=backend)
tn.connect(b[0], a[2])
trace_edge = tn.connect(a[0], a[1])
c = tn.contract(trace_edge)
tn.check_correct({c})
def test_node_get_dim_bad_axis(backend):
node = tn.Node(np.eye(2), name="a", axis_names=["1", "2"], backend=backend)
with pytest.raises(ValueError):
node.get_dimension(10)
def test_named_axis(backend):
a = tn.Node(np.eye(2), axis_names=["alpha", "beta"], backend=backend)
e = tn.connect(a["alpha"], a["beta"])
b = tn.contract(e)
np.testing.assert_allclose(b.tensor, 2.0)
def test_mixed_named_axis(backend):
a = tn.Node(np.eye(2) * 2.0, axis_names=["alpha", "beta"], backend=backend)
b = tn.Node(np.eye(2) * 3.0, backend=backend)
e1 = tn.connect(a["alpha"], b[0])
# Axes should still be indexable by numbers even with naming.
e2 = tn.connect(a[1], b[1])
tn.contract(e1)
result = tn.contract(e2)
np.testing.assert_allclose(result.tensor, 12.0)
def test_duplicate_name(backend):
with pytest.raises(ValueError):
tn.Node(np.eye(2), axis_names=["test", "test"], backend=backend)
def test_bad_axis_name_length(backend):
with pytest.raises(ValueError):
# This should have 2 names, not 1.
tn.Node(np.eye(2), axis_names=["need_2_names"], backend=backend)
def test_bad_axis_name_connect(backend):
a = tn.Node(np.eye(2), axis_names=["test", "names"], backend=backend)
with pytest.raises(ValueError):
a.get_edge("bad_name")
def test_node_edge_ordering(backend):
a = tn.Node(np.zeros((2, 3, 4)), backend=backend)
e2 = a[0]
e3 = a[1]
e4 = a[2]
assert a.shape == (2, 3, 4)
a.reorder_edges([e4, e2, e3])
tn.check_correct({a})
assert a.shape == (4, 2, 3)
assert e2.axis1 == 1
assert e3.axis1 == 2
assert e4.axis1 == 0
def test_trace_edge_ordering(backend):
a = tn.Node(np.zeros((2, 2, 3)), backend=backend)
e2 = tn.connect(a[1], a[0])
e3 = a[2]
with pytest.raises(ValueError):
a.reorder_edges([e2, e3])
def test_mismatch_edge_ordering(backend):
a = tn.Node(np.zeros((2, 3)), backend=backend)
e2_a = a[0]
b = tn.Node(np.zeros((2,)), backend=backend)
e_b = b[0]
with pytest.raises(ValueError):
a.reorder_edges([e2_a, e_b])
def test_complicated_edge_reordering(backend):
a = tn.Node(np.zeros((2, 3, 4)), backend=backend)
b = tn.Node(np.zeros((2, 5)), backend=backend)
c = tn.Node(np.zeros((3,)), backend=backend)
d = tn.Node(np.zeros((4, 5)), backend=backend)
e_ab = tn.connect(a[0], b[0])
e_bd = tn.connect(b[1], d[1])
e_ac = tn.connect(a[1], c[0])
e_ad = tn.connect(a[2], d[0])
result = tn.contract(e_bd)
a.reorder_edges([e_ac, e_ab, e_ad])
tn.check_correct(tn.reachable(result))
assert a.shape == (3, 2, 4)
def test_edge_reorder_axis_names(backend):
a = tn.Node(
np.zeros((2, 3, 4, 5)), axis_names=["a", "b", "c", "d"], backend=backend)
edge_a = a["a"]
edge_b = a["b"]
edge_c = a["c"]
edge_d = a["d"]
a.reorder_edges([edge_c, edge_b, edge_d, edge_a])
assert a.shape == (4, 3, 5, 2)
assert a.axis_names == ["c", "b", "d", "a"]
def test_add_axis_names(backend):
a = tn.Node(
np.eye(2), name="A", axis_names=["ignore1", "ignore2"], backend=backend)
a.add_axis_names(["a", "b"])
assert a.axis_names == ["a", "b"]
def test_reorder_axes(backend):
a = tn.Node(np.zeros((2, 3, 4)), backend=backend)
b = tn.Node(np.zeros((3, 4, 5)), backend=backend)
c = tn.Node(np.zeros((2, 4, 5)), backend=backend)
tn.connect(a[0], c[0])
tn.connect(b[0], a[1])
tn.connect(a[2], c[1])
tn.connect(b[2], c[2])
a.reorder_axes([2, 0, 1])
tn.check_correct({a, b, c})
assert a.shape == (4, 2, 3)
def test_reorder_axes_raises_error_no_tensor(backend):
a = tn.Node(np.zeros((2, 3, 4)), backend=backend)
del a._tensor
with pytest.raises(AttributeError) as e:
a.reorder_axes([2, 0, 1])
assert "Please provide a valid tensor for this Node." in str(e.value)
def test_reorder_axes_raises_error_bad_permutation(backend):
a = tn.Node(np.zeros((2, 3, 4)), backend=backend)
with pytest.raises(ValueError) as e:
a.reorder_axes([2, 0])
assert "A full permutation was not passed." in str(e.value)
def test_flatten_consistent_result(backend):
a_val = np.ones((3, 5, 5, 6))
b_val = np.ones((5, 6, 4, 5))
# Create non flattened example to compare against.
a_noflat = tn.Node(a_val, backend=backend)
b_noflat = tn.Node(b_val, backend=backend)
e1 = tn.connect(a_noflat[1], b_noflat[3])
e2 = tn.connect(a_noflat[3], b_noflat[1])
e3 = tn.connect(a_noflat[2], b_noflat[0])
a_dangling_noflat = a_noflat[0]
b_dangling_noflat = b_noflat[2]
for edge in [e1, e2, e3]:
noflat_result_node = tn.contract(edge)
noflat_result_node.reorder_edges([a_dangling_noflat, b_dangling_noflat])
noflat_result = noflat_result_node.tensor
# Create network with flattening
a_flat = tn.Node(a_val, backend=backend)
b_flat = tn.Node(b_val, backend=backend)
e1 = tn.connect(a_flat[1], b_flat[3])
e2 = tn.connect(a_flat[3], b_flat[1])
e3 = tn.connect(a_flat[2], b_flat[0])
a_dangling_flat = a_flat[0]
b_dangling_flat = b_flat[2]
final_edge = tn.flatten_edges([e1, e2, e3])
flat_result_node = tn.contract(final_edge)
flat_result_node.reorder_edges([a_dangling_flat, b_dangling_flat])
flat_result = flat_result_node.tensor
np.testing.assert_allclose(flat_result, noflat_result)
def test_flatten_consistent_tensor(backend):
a_val = np.ones((2, 3, 4, 5))
b_val = np.ones((3, 5, 4, 2))
a = tn.Node(a_val, backend=backend)
b = tn.Node(b_val, backend=backend)
e1 = tn.connect(a[0], b[3])
e2 = tn.connect(b[1], a[3])
e3 = tn.connect(a[1], b[0])
tn.flatten_edges([e3, e1, e2])
tn.check_correct({a, b})
# Check expected values.
a_final = np.reshape(np.transpose(a_val, (2, 1, 0, 3)), (4, 30))
b_final = np.reshape(np.transpose(b_val, (2, 0, 3, 1)), (4, 30))
np.testing.assert_allclose(a.tensor, a_final)
np.testing.assert_allclose(b.tensor, b_final)
def test_flatten_trace_consistent_result(backend):
a_val = np.ones((5, 6, 6, 7, 5, 7))
a_noflat = tn.Node(a_val, backend=backend)
e1 = tn.connect(a_noflat[0], a_noflat[4])
e2 = tn.connect(a_noflat[1], a_noflat[2])
e3 = tn.connect(a_noflat[3], a_noflat[5])
for edge in [e1, e2, e3]:
noflat_result = tn.contract(edge).tensor
# Create network with flattening
a_flat = tn.Node(a_val)
e1 = tn.connect(a_flat[0], a_flat[4])
e2 = tn.connect(a_flat[1], a_flat[2])
e3 = tn.connect(a_flat[3], a_flat[5])
final_edge = tn.flatten_edges([e1, e2, e3])
flat_result = tn.contract(final_edge).tensor
np.testing.assert_allclose(flat_result, noflat_result)
def test_flatten_trace_consistent_tensor(backend):
a_val = np.ones((5, 3, 4, 4, 5))
a = tn.Node(a_val, backend=backend)
e1 = tn.connect(a[0], a[4])
e2 = tn.connect(a[3], a[2])
tn.flatten_edges([e2, e1])
tn.check_correct({a})
# Check expected values.
a_final = np.reshape(np.transpose(a_val, (1, 2, 0, 3, 4)), (3, 20, 20))
np.testing.assert_allclose(a.tensor, a_final)
def test_contract_between_output_order(backend):
a_val = np.ones((2, 3, 4, 5))
b_val = np.ones((3, 5, 4, 2))
c_val = np.ones((2, 2))
a = tn.Node(a_val, backend=backend)
b = tn.Node(b_val, backend=backend)
c = tn.Node(c_val, backend=backend)
tn.connect(a[0], b[3])
tn.connect(b[1], a[3])
tn.connect(a[1], b[0])
with pytest.raises(ValueError):
d = tn.contract_between(
a, b, name="New Node", output_edge_order=[a[2], b[2], a[0]])
tn.check_correct({a, b, c}, check_connections=False)
with pytest.raises(ValueError):
d = tn.contract_between(
a, b, name="New Node", output_edge_order=[a[2], b[2], c[0]])
tn.check_correct({a, b, c}, check_connections=False)
d = tn.contract_between(a, b, name="New Node", output_edge_order=[b[2], a[2]])
tn.check_correct({c, d}, check_connections=False)
a_flat = np.reshape(np.transpose(a_val, (2, 1, 0, 3)), (4, 30))
b_flat = np.reshape(np.transpose(b_val, (2, 0, 3, 1)), (4, 30))
final_val = np.matmul(b_flat, a_flat.T)
np.testing.assert_allclose(d.tensor, final_val)
assert d.name == "New Node"
def test_contract_between_trace_edges(backend):
a_val = np.ones((3, 3))
final_val = np.trace(a_val)
a = tn.Node(a_val, backend=backend)
tn.connect(a[0], a[1])
b = tn.contract_between(a, a)
tn.check_correct({b})
np.testing.assert_allclose(b.tensor, final_val)
def test_disable_edges_complex(backend):
a = tn.Node(np.eye(2), backend=backend)
b = tn.Node(np.eye(2), backend=backend)
c = tn.Node(np.eye(2), backend=backend)
e1 = tn.connect(a[0], b[0])
e2 = tn.connect(b[1], c[0])
tn.contract(e1)
tn.contract(e2)
# This now raises an exception because we contract disables contracted edges
# and raises a ValueError if we try to access the nodes
with pytest.raises(ValueError):
e1.node1
with pytest.raises(ValueError):
e2.node1
def test_edge_disable_complex(backend):
a = tn.Node(np.eye(2), backend=backend)
b = tn.Node(np.eye(2), backend=backend)
c = tn.Node(np.eye(2), backend=backend)
e1 = tn.connect(a[0], b[0])
e2 = tn.connect(b[1], c[0])
tn.contract(e1)
tn.contract(e2)
# This now raises an exception because we contract disables contracted edges
# and raises a ValueError if we try to access the nodes
with pytest.raises(ValueError):
e1.node1
# This raises an exception since the intermediate node created when doing
# `tn.contract(e2)` was garbage collected.
with pytest.raises(ValueError):
e2.node1
def test_set_node2(backend):
a = tn.Node(np.eye(2), backend=backend)
b = tn.Node(np.eye(2), backend=backend)
e = tn.connect(a[0], b[0])
# You should never do this, but if you do, we should handle
# it gracefully.
e.node2 = None
assert e.is_dangling()
def test_set_default(backend):
tn.set_default_backend(backend)
assert _default_backend_stack.default_backend == backend
a = tn.Node(np.eye(2))
assert a.backend.name == backend
def test_bad_backend_name():
with pytest.raises(ValueError, match="Backend 'BAD_NAME' was not found."):
tn.set_default_backend("BAD_NAME")
def test_copy_tensor(backend):
a = tn.Node(np.array([1, 2, 3], dtype=np.float64), backend=backend)
b = tn.Node(np.array([10, 20, 30], dtype=np.float64), backend=backend)
c = tn.Node(np.array([5, 6, 7], dtype=np.float64), backend=backend)
d = tn.Node(np.array([1, -1, 1], dtype=np.float64), backend=backend)
cn = tn.CopyNode(rank=4, dimension=3, backend=backend)
edge1 = tn.connect(a[0], cn[0])
edge2 = tn.connect(b[0], cn[1])
edge3 = tn.connect(c[0], cn[2])
edge4 = tn.connect(d[0], cn[3])
result = cn.compute_contracted_tensor()
assert list(result.shape) == []
np.testing.assert_allclose(result, 50 - 240 + 630)
for edge in [edge1, edge2, edge3, edge4]:
val = tn.contract(edge)
result = val.tensor
assert list(result.shape) == []
np.testing.assert_allclose(result, 50 - 240 + 630)
# Include 'tensorflow' (by removing the decorator) once #87 is fixed.
@pytest.mark.parametrize('backend', ('numpy', 'jax'))
def test_copy_tensor_parallel_edges(backend):
a = tn.Node(np.diag([1., 2, 3]), backend=backend)
b = tn.Node(np.array([10, 20, 30], dtype=np.float64), backend=backend)
cn = tn.CopyNode(rank=3, dimension=3, backend=backend)
edge1 = tn.connect(a[0], cn[0])
edge2 = tn.connect(a[1], cn[1])
edge3 = tn.connect(b[0], cn[2])
result = cn.compute_contracted_tensor()
assert list(result.shape) == []
np.testing.assert_allclose(result, 10 + 40 + 90)
for edge in [edge1, edge2, edge3]:
val = tn.contract(edge)
result = val.tensor
assert list(result.shape) == []
np.testing.assert_allclose(result, 10 + 40 + 90)
def test_contract_copy_node_connected_neighbors(backend):
a = tn.Node(np.array([[1, 2, 3], [10, 20, 30]]), backend=backend)
b = tn.Node(np.array([[2, 1, 1], [2, 2, 2]]), backend=backend)
c = tn.Node(np.array([3, 4, 4]), backend=backend)
cn = tn.CopyNode(rank=3, dimension=3, backend=backend)
tn.connect(a[0], b[0])
tn.connect(a[1], cn[0])
tn.connect(b[1], cn[1])
tn.connect(c[0], cn[2])
n = tn.contract_copy_node(cn)
assert len(n.edges) == 2
assert n.edges[0] == n.edges[1]
val = tn.contract_parallel(n.edges[0])
result = val.tensor
assert list(result.shape) == []
np.testing.assert_allclose(result, 26 + 460)
def test_bad_backend():
with pytest.raises(ValueError):
tn.Node(np.eye(2), backend="BAD_BACKEND_NAME")
def test_remove_node(backend):
a = tn.Node(
np.ones((2, 2, 2)),
axis_names=["test", "names", "ignore"],
backend=backend)
b = tn.Node(np.ones((2, 2)), backend=backend)
c = tn.Node(np.ones((2, 2)), backend=backend)
tn.connect(a["test"], b[0])
tn.connect(a[1], c[0])
broken_edges_name, broken_edges_axis = tn.remove_node(a)
assert "test" in broken_edges_name
assert broken_edges_name["test"] is b[0]
assert "names" in broken_edges_name
assert broken_edges_name["names"] is c[0]
assert "ignore" not in broken_edges_name
assert 0 in broken_edges_axis
assert 1 in broken_edges_axis
assert 2 not in broken_edges_axis
assert broken_edges_axis[0] is b[0]
assert broken_edges_axis[1] is c[0]
def test_remove_node_trace_edge(backend):
a = tn.Node(np.ones((2, 2, 2)), backend=backend)
b = tn.Node(np.ones(2), backend=backend)
tn.connect(a[0], b[0])
tn.connect(a[1], a[2])
_, broken_edges = tn.remove_node(a)
assert 0 in broken_edges
assert 1 not in broken_edges
assert 2 not in broken_edges
assert broken_edges[0] is b[0]
def test_at_operator(backend):
a = tn.Node(np.ones((2,)), backend=backend)
b = tn.Node(np.ones((2,)), backend=backend)
tn.connect(a[0], b[0])
c = a @ b
assert isinstance(c, tn.Node)
np.testing.assert_allclose(c.tensor, 2.0)
def test_connect_alias(backend):
a = tn.Node(np.ones((2, 2)), backend=backend)
b = tn.Node(np.ones((2, 2)), backend=backend)
e = a[0] ^ b[0]
assert set(e.get_nodes()) == {a, b}
assert e is a[0]
assert e is b[0]
def test_remove_after_flatten(backend):
a = tn.Node(np.ones((2, 2)), backend=backend)
b = tn.Node(np.ones((2, 2)), backend=backend)
tn.connect(a[0], b[0])
tn.connect(a[1], b[1])
tn.flatten_all_edges({a, b})
tn.remove_node(a)
def test_custom_backend():
# pylint: disable=abstract-method
class StringBackend(tn.AbstractBackend):
def __init__(self):
super().__init__()
self.name = "string_backend"
def tensordot(self, a, b, axes):
return a + b
def convert_to_tensor(self, tensor):
return tensor
def shape_tuple(self, tensor):
return (1,)
backend = StringBackend()
assert isinstance(backend, tn.AbstractBackend)
a = tn.Node("Hello ", backend=backend)
b = tn.Node("world!", backend=backend)
a[0] ^ b[0]
c = a @ b
assert c.tensor == "Hello world!"
|
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import json
import fs.path
import instalooter.looters
_test_dir = os.path.abspath(os.path.join(__file__, ".."))
_url = "tar://{}/ig_mock.tar.gz".format(_test_dir)
def get_mock_fs():
return fs.open_fs(_url)
class MockPages(object):
def __init__(self, profile):
self.profile = profile
def __call__(self):
with get_mock_fs() as mockfs:
with mockfs.open("pages/{}".format(self.profile)) as f:
return iter(json.load(f))
if __name__ == "__main__":
with fs.open_fs(_test_dir) as test_fs:
if test_fs.exists(fs.path.basename(_url)):
test_fs.remove(fs.path.basename(_url))
with fs.open_fs(_url, create=True) as mockfs:
mockfs.makedir("pages", recreate=True)
nintendo = instalooter.looters.ProfileLooter("nintendo")
with mockfs.open("pages/nintendo", "w") as f:
json.dump(list(nintendo.pages()), f)
fluoxetine = instalooter.looters.HashtagLooter("fluoxetine")
with mockfs.open("pages/fluoxetine", "w") as f:
pages_it = fluoxetine.pages_it
json.dump([next(pages_it) for _ in range(3)], f)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.