text
stringlengths 213
32.3k
|
---|
import pytest
from tests.async_mock import patch
@pytest.fixture()
def ping_client():
"""Define a patched client that returns a successful ping response."""
with patch(
"homeassistant.components.guardian.async_setup_entry", return_value=True
), patch("aioguardian.client.Client.connect"), patch(
"aioguardian.commands.system.SystemCommands.ping",
return_value={"command": 0, "status": "ok", "data": {"uid": "ABCDEF123456"}},
), patch(
"aioguardian.client.Client.disconnect"
):
yield
|
from PyQt5.QtCore import QDir, Qt
from PyQt5.QtGui import QDesktopServices, QGuiApplication
from PyQt5.QtWidgets import QTextBrowser
from ReText import globalSettings
class ReTextPreview(QTextBrowser):
def __init__(self, tab):
QTextBrowser.__init__(self)
self.tab = tab
# if set to True, links to other files will unsuccessfully be opened as anchors
self.setOpenLinks(False)
self.anchorClicked.connect(self.openInternal)
def disconnectExternalSignals(self):
pass
def openInternal(self, link):
url = link.url()
isLocalHtml = (link.scheme() in ('file', '') and url.endswith('.html'))
if url.startswith('#'):
self.scrollToAnchor(url[1:])
return
elif link.isRelative():
fileToOpen = QDir.current().filePath(url)
if self.tab.openSourceFile(fileToOpen):
return
if globalSettings.handleWebLinks and isLocalHtml:
self.setSource(link)
else:
QDesktopServices.openUrl(link)
class ReTextWebPreview:
"""This is a common class shared between WebKit and WebEngine
based previews."""
def __init__(self, editBox):
self.editBox = editBox
self.settings().setDefaultTextEncoding('utf-8')
# Events relevant to sync scrolling
self.editBox.cursorPositionChanged.connect(self._handleCursorPositionChanged)
self.editBox.verticalScrollBar().valueChanged.connect(self.syncscroll.handleEditorScrolled)
self.editBox.resized.connect(self._handleEditorResized)
# Scroll the preview when the mouse wheel is used to scroll
# beyond the beginning/end of the editor
self.editBox.scrollLimitReached.connect(self._handleWheelEvent)
def disconnectExternalSignals(self):
self.editBox.cursorPositionChanged.disconnect(self._handleCursorPositionChanged)
self.editBox.verticalScrollBar().valueChanged.disconnect(self.syncscroll.handleEditorScrolled)
self.editBox.resized.disconnect(self._handleEditorResized)
self.editBox.scrollLimitReached.disconnect(self._handleWheelEvent)
def _handleCursorPositionChanged(self):
editorCursorPosition = self.editBox.verticalScrollBar().value() + \
self.editBox.cursorRect().top()
self.syncscroll.handleCursorPositionChanged(editorCursorPosition)
def _handleEditorResized(self, rect):
self.syncscroll.handleEditorResized(rect.height())
def wheelEvent(self, event):
if QGuiApplication.keyboardModifiers() == Qt.ControlModifier:
zoomFactor = self.zoomFactor()
zoomFactor *= 1.001 ** event.angleDelta().y()
self.setZoomFactor(zoomFactor)
return super().wheelEvent(event)
|
import tempfile
import pytest
import t.skip
from kombu import Connection, Exchange, Queue, Consumer, Producer
@t.skip.if_win32
class test_FilesystemTransport:
def setup(self):
self.channels = set()
try:
data_folder_in = tempfile.mkdtemp()
data_folder_out = tempfile.mkdtemp()
except Exception:
pytest.skip('filesystem transport: cannot create tempfiles')
self.c = Connection(transport='filesystem',
transport_options={
'data_folder_in': data_folder_in,
'data_folder_out': data_folder_out,
})
self.channels.add(self.c.default_channel)
self.p = Connection(transport='filesystem',
transport_options={
'data_folder_in': data_folder_out,
'data_folder_out': data_folder_in,
})
self.channels.add(self.p.default_channel)
self.e = Exchange('test_transport_filesystem')
self.q = Queue('test_transport_filesystem',
exchange=self.e,
routing_key='test_transport_filesystem')
self.q2 = Queue('test_transport_filesystem2',
exchange=self.e,
routing_key='test_transport_filesystem2')
def teardown(self):
# make sure we don't attempt to restore messages at shutdown.
for channel in self.channels:
try:
channel._qos._dirty.clear()
except AttributeError:
pass
try:
channel._qos._delivered.clear()
except AttributeError:
pass
def _add_channel(self, channel):
self.channels.add(channel)
return channel
def test_produce_consume_noack(self):
producer = Producer(self._add_channel(self.p.channel()), self.e)
consumer = Consumer(self._add_channel(self.c.channel()), self.q,
no_ack=True)
for i in range(10):
producer.publish({'foo': i},
routing_key='test_transport_filesystem')
_received = []
def callback(message_data, message):
_received.append(message)
consumer.register_callback(callback)
consumer.consume()
while 1:
if len(_received) == 10:
break
self.c.drain_events()
assert len(_received) == 10
def test_produce_consume(self):
producer_channel = self._add_channel(self.p.channel())
consumer_channel = self._add_channel(self.c.channel())
producer = Producer(producer_channel, self.e)
consumer1 = Consumer(consumer_channel, self.q)
consumer2 = Consumer(consumer_channel, self.q2)
self.q2(consumer_channel).declare()
for i in range(10):
producer.publish({'foo': i},
routing_key='test_transport_filesystem')
for i in range(10):
producer.publish({'foo': i},
routing_key='test_transport_filesystem2')
_received1 = []
_received2 = []
def callback1(message_data, message):
_received1.append(message)
message.ack()
def callback2(message_data, message):
_received2.append(message)
message.ack()
consumer1.register_callback(callback1)
consumer2.register_callback(callback2)
consumer1.consume()
consumer2.consume()
while 1:
if len(_received1) + len(_received2) == 20:
break
self.c.drain_events()
assert len(_received1) + len(_received2) == 20
# compression
producer.publish({'compressed': True},
routing_key='test_transport_filesystem',
compression='zlib')
m = self.q(consumer_channel).get()
assert m.payload == {'compressed': True}
# queue.delete
for i in range(10):
producer.publish({'foo': i},
routing_key='test_transport_filesystem')
assert self.q(consumer_channel).get()
self.q(consumer_channel).delete()
self.q(consumer_channel).declare()
assert self.q(consumer_channel).get() is None
# queue.purge
for i in range(10):
producer.publish({'foo': i},
routing_key='test_transport_filesystem2')
assert self.q2(consumer_channel).get()
self.q2(consumer_channel).purge()
assert self.q2(consumer_channel).get() is None
|
from decimal import Decimal, DecimalException
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_NAME,
CONF_SOURCE,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TIME_DAYS,
TIME_HOURS,
TIME_MINUTES,
TIME_SECONDS,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.restore_state import RestoreEntity
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
ATTR_SOURCE_ID = "source"
CONF_ROUND_DIGITS = "round"
CONF_UNIT_PREFIX = "unit_prefix"
CONF_UNIT_TIME = "unit_time"
CONF_UNIT = "unit"
CONF_TIME_WINDOW = "time_window"
# SI Metric prefixes
UNIT_PREFIXES = {
None: 1,
"n": 1e-9,
"µ": 1e-6,
"m": 1e-3,
"k": 1e3,
"M": 1e6,
"G": 1e9,
"T": 1e12,
}
# SI Time prefixes
UNIT_TIME = {
TIME_SECONDS: 1,
TIME_MINUTES: 60,
TIME_HOURS: 60 * 60,
TIME_DAYS: 24 * 60 * 60,
}
ICON = "mdi:chart-line"
DEFAULT_ROUND = 3
DEFAULT_TIME_WINDOW = 0
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_SOURCE): cv.entity_id,
vol.Optional(CONF_ROUND_DIGITS, default=DEFAULT_ROUND): vol.Coerce(int),
vol.Optional(CONF_UNIT_PREFIX, default=None): vol.In(UNIT_PREFIXES),
vol.Optional(CONF_UNIT_TIME, default=TIME_HOURS): vol.In(UNIT_TIME),
vol.Optional(CONF_UNIT): cv.string,
vol.Optional(CONF_TIME_WINDOW, default=DEFAULT_TIME_WINDOW): cv.time_period,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the derivative sensor."""
derivative = DerivativeSensor(
source_entity=config[CONF_SOURCE],
name=config.get(CONF_NAME),
round_digits=config[CONF_ROUND_DIGITS],
unit_prefix=config[CONF_UNIT_PREFIX],
unit_time=config[CONF_UNIT_TIME],
unit_of_measurement=config.get(CONF_UNIT),
time_window=config[CONF_TIME_WINDOW],
)
async_add_entities([derivative])
class DerivativeSensor(RestoreEntity):
"""Representation of an derivative sensor."""
def __init__(
self,
source_entity,
name,
round_digits,
unit_prefix,
unit_time,
unit_of_measurement,
time_window,
):
"""Initialize the derivative sensor."""
self._sensor_source_id = source_entity
self._round_digits = round_digits
self._state = 0
self._state_list = [] # List of tuples with (timestamp, sensor_value)
self._name = name if name is not None else f"{source_entity} derivative"
if unit_of_measurement is None:
final_unit_prefix = "" if unit_prefix is None else unit_prefix
self._unit_template = f"{final_unit_prefix}{{}}/{unit_time}"
# we postpone the definition of unit_of_measurement to later
self._unit_of_measurement = None
else:
self._unit_of_measurement = unit_of_measurement
self._unit_prefix = UNIT_PREFIXES[unit_prefix]
self._unit_time = UNIT_TIME[unit_time]
self._time_window = time_window.total_seconds()
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None:
try:
self._state = Decimal(state.state)
except SyntaxError as err:
_LOGGER.warning("Could not restore last state: %s", err)
@callback
def calc_derivative(event):
"""Handle the sensor state changes."""
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
if (
old_state is None
or old_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]
or new_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]
):
return
now = new_state.last_updated
# Filter out the tuples that are older than (and outside of the) `time_window`
self._state_list = [
(timestamp, state)
for timestamp, state in self._state_list
if (now - timestamp).total_seconds() < self._time_window
]
# It can happen that the list is now empty, in that case
# we use the old_state, because we cannot do anything better.
if len(self._state_list) == 0:
self._state_list.append((old_state.last_updated, old_state.state))
self._state_list.append((new_state.last_updated, new_state.state))
if self._unit_of_measurement is None:
unit = new_state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
self._unit_of_measurement = self._unit_template.format(
"" if unit is None else unit
)
try:
# derivative of previous measures.
last_time, last_value = self._state_list[-1]
first_time, first_value = self._state_list[0]
elapsed_time = (last_time - first_time).total_seconds()
delta_value = Decimal(last_value) - Decimal(first_value)
derivative = (
delta_value
/ Decimal(elapsed_time)
/ Decimal(self._unit_prefix)
* Decimal(self._unit_time)
)
assert isinstance(derivative, Decimal)
except ValueError as err:
_LOGGER.warning("While calculating derivative: %s", err)
except DecimalException as err:
_LOGGER.warning(
"Invalid state (%s > %s): %s", old_state.state, new_state.state, err
)
except AssertionError as err:
_LOGGER.error("Could not calculate derivative: %s", err)
else:
self._state = derivative
self.async_write_ha_state()
async_track_state_change_event(
self.hass, [self._sensor_source_id], calc_derivative
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return round(self._state, self._round_digits)
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_SOURCE_ID: self._sensor_source_id}
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
|
import weakref
import inspect
from ._loop import loop
from . import logger
def action(func):
""" Decorator to turn a method of a Component into an
:class:`Action <flexx.event.Action>`.
Actions change the state of the application by
:func:`mutating <flexx.event.Component._mutate>`
:class:`properties <flexx.event.Property>`.
In fact, properties can only be changed via actions.
Actions are asynchronous and thread-safe. Invoking an action will not
apply the changes directly; the action is queued and handled at a later
time. The one exception is that when an action is invoked from anoher
action, it is handled directly.
Although setting properties directly might seem nice, their use would mean
that the state of the application can change while the app is *reacting*
to changes in the state. This might be managable for small applications,
but as an app grows this easily results in inconsistencies and bugs.
Separating actions (which modify state) and reactions (that react to it)
makes apps easier to understand and debug. This is the core idea behind
frameworks such as Elm, React and Veux. And Flexx adopts it as well.
Example usage:
.. code-block:: py
class MyComponent(event.Component):
count = event.IntProp(0)
@action
def increase_counter(self):
self._mutate_count(self.count + 1) # call mutator function
"""
if not callable(func):
raise TypeError('The event.action() decorator needs a function.')
if getattr(func, '__self__', None) is not None: # builtin funcs have __self__
raise TypeError('Invalid use of action decorator.')
return ActionDescriptor(func, func.__name__, func.__doc__ or func.__name__)
class BaseDescriptor:
""" Base descriptor class for some commonalities.
"""
def __repr__(self):
t = '<%s %r (this should be a class attribute) at 0x%x>'
return t % (self.__class__.__name__, self._name, id(self))
def __set__(self, obj, value):
cname = self.__class__.__name__
cname = cname[:-10] if cname.endswith('Descriptor') else cname
raise AttributeError('Cannot overwrite %s %r.' % (cname, self._name))
def __delete__(self, obj):
cname = self.__class__.__name__
cname = cname[:-10] if cname.endswith('Descriptor') else cname
raise AttributeError('Cannot delete %s %r.' % (cname, self._name))
@staticmethod
def _format_doc(kind, name, doc, func=None):
prefix, betweenfix = '', ' '
doc = (doc or '').strip()
# Prevent Sphinx doing something weird when it sees a colon on first line
if doc.count('\n') and doc.split('\n')[0].strip().count(':'):
line2 = doc.split('\n')[1]
betweenfix = '\n' + ' ' * (len(line2) - len(line2.lstrip()))
if doc:
if func:
sig = str(inspect.signature(func))
sig = '(' + sig[5:].lstrip(', ') if sig.startswith('(self') else sig
prefix = '{}{}\n'.format(name, sig)
return '{}*{}* –{}{}\n'.format(prefix, kind, betweenfix, doc or name)
class ActionDescriptor(BaseDescriptor):
""" Class descriptor for actions.
"""
def __init__(self, func, name, doc):
self._func = func
self._name = name
self.__doc__ = self._format_doc('action', name, doc, func)
def __get__(self, instance, owner):
# Return Action object, which we cache on the instance
if instance is None:
return self
private_name = '_' + self._name + '_action'
try:
action = getattr(instance, private_name)
except AttributeError:
action = Action(instance, self._func, self._name, self.__doc__)
setattr(instance, private_name, action)
# Make the action use *our* func one time. In most situations
# this is the same function that the action has, but not when
# using super(); i.e. this allows an action to call the same
# action of its super class.
action._use_once(self._func)
return action
class Action:
""" Action objects are wrappers around Component methods. They take
care of queueing action invokations rather than calling the function
directly, unless the action is called from another action (in this
case it would a direct call). This class should not be instantiated
directly; use ``event.action()`` instead.
"""
def __init__(self, ob, func, name, doc):
assert callable(func)
# Store func, name, and docstring (e.g. for sphinx docs)
self._ob1 = weakref.ref(ob)
self._func = func
self._func_once = func
self._name = name
self.__doc__ = doc
self.is_autogenerated = func.__name__ == 'flx_setter' # also see _js.py
def __repr__(self):
cname = self.__class__.__name__
return '<%s %r at 0x%x>' % (cname, self._name, id(self))
def _use_once(self, func):
""" To support super().
"""
self._func_once = func
def __call__(self, *args):
""" Invoke the action.
"""
ob = self._ob1()
if loop.can_mutate(ob):
func = self._func_once
self._func_once = self._func
if ob is not None:
res = func(ob, *args)
if res is not None:
logger.warning('Action (%s) should not return a value' %
self._name)
else:
loop.add_action_invokation(self, args)
return ob # 'Actions are invoked asynchronously'
|
__docformat__ = "restructuredtext en"
import sys
import os
from os.path import (splitext, join, abspath, isdir, dirname, exists,
basename, expanduser, normcase, realpath)
from imp import find_module, load_module, C_BUILTIN, PY_COMPILED, PKG_DIRECTORY
from distutils.sysconfig import get_config_var, get_python_lib, get_python_version
from distutils.errors import DistutilsPlatformError
from six import PY3
from six.moves import map, range
try:
import zipimport
except ImportError:
zipimport = None
ZIPFILE = object()
from logilab.common import STD_BLACKLIST, _handle_blacklist
from logilab.common.deprecation import deprecated
# Notes about STD_LIB_DIR
# Consider arch-specific installation for STD_LIB_DIR definition
# :mod:`distutils.sysconfig` contains to much hardcoded values to rely on
#
# :see: `Problems with /usr/lib64 builds <http://bugs.python.org/issue1294959>`_
# :see: `FHS <http://www.pathname.com/fhs/pub/fhs-2.3.html#LIBLTQUALGTALTERNATEFORMATESSENTIAL>`_
if sys.platform.startswith('win'):
PY_SOURCE_EXTS = ('py', 'pyw')
PY_COMPILED_EXTS = ('dll', 'pyd')
else:
PY_SOURCE_EXTS = ('py',)
PY_COMPILED_EXTS = ('so',)
try:
STD_LIB_DIR = get_python_lib(standard_lib=True)
# get_python_lib(standard_lib=1) is not available on pypy, set STD_LIB_DIR to
# non-valid path, see https://bugs.pypy.org/issue1164
except DistutilsPlatformError:
STD_LIB_DIR = '//'
EXT_LIB_DIR = get_python_lib()
BUILTIN_MODULES = dict.fromkeys(sys.builtin_module_names, True)
class NoSourceFile(Exception):
"""exception raised when we are not able to get a python
source file for a precompiled file
"""
class LazyObject(object):
def __init__(self, module, obj):
self.module = module
self.obj = obj
self._imported = None
def _getobj(self):
if self._imported is None:
self._imported = getattr(load_module_from_name(self.module),
self.obj)
return self._imported
def __getattribute__(self, attr):
try:
return super(LazyObject, self).__getattribute__(attr)
except AttributeError as ex:
return getattr(self._getobj(), attr)
def __call__(self, *args, **kwargs):
return self._getobj()(*args, **kwargs)
def load_module_from_name(dotted_name, path=None, use_sys=True):
"""Load a Python module from its name.
:type dotted_name: str
:param dotted_name: python name of a module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
return load_module_from_modpath(dotted_name.split('.'), path, use_sys)
def load_module_from_modpath(parts, path=None, use_sys=True):
"""Load a python module from its splitted name.
:type parts: list(str) or tuple(str)
:param parts:
python name of a module or package splitted on '.'
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
if use_sys:
try:
return sys.modules['.'.join(parts)]
except KeyError:
pass
modpath = []
prevmodule = None
for part in parts:
modpath.append(part)
curname = '.'.join(modpath)
module = None
if len(modpath) != len(parts):
# even with use_sys=False, should try to get outer packages from sys.modules
module = sys.modules.get(curname)
elif use_sys:
# because it may have been indirectly loaded through a parent
module = sys.modules.get(curname)
if module is None:
mp_file, mp_filename, mp_desc = find_module(part, path)
try:
module = load_module(curname, mp_file, mp_filename, mp_desc)
finally:
if mp_file is not None:
mp_file.close()
if prevmodule:
setattr(prevmodule, part, module)
_file = getattr(module, '__file__', '')
prevmodule = module
if not _file and _is_namespace(curname):
continue
if not _file and len(modpath) != len(parts):
raise ImportError('no module in %s' % '.'.join(parts[len(modpath):]) )
path = [dirname( _file )]
return module
def load_module_from_file(filepath, path=None, use_sys=True, extrapath=None):
"""Load a Python module from it's path.
:type filepath: str
:param filepath: path to the python module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
modpath = modpath_from_file(filepath, extrapath)
return load_module_from_modpath(modpath, path, use_sys)
def _check_init(path, mod_path):
"""check there are some __init__.py all along the way"""
modpath = []
for part in mod_path:
modpath.append(part)
path = join(path, part)
if not _is_namespace('.'.join(modpath)) and not _has_init(path):
return False
return True
def _canonicalize_path(path):
return realpath(expanduser(path))
def _path_from_filename(filename):
if PY3:
return filename
else:
if filename.endswith(".pyc"):
return filename[:-1]
return filename
@deprecated('you should avoid using modpath_from_file()')
def modpath_from_file(filename, extrapath=None):
"""DEPRECATED: doens't play well with symlinks and sys.meta_path
Given a file path return the corresponding splitted module's name
(i.e name of a module or package splitted on '.')
:type filename: str
:param filename: file's path for which we want the module's name
:type extrapath: dict
:param extrapath:
optional extra search path, with path as key and package name for the path
as value. This is usually useful to handle package splitted in multiple
directories using __path__ trick.
:raise ImportError:
if the corresponding module's name has not been found
:rtype: list(str)
:return: the corresponding splitted module's name
"""
filename = _path_from_filename(filename)
filename = _canonicalize_path(filename)
base = os.path.splitext(filename)[0]
if extrapath is not None:
for path_ in map(_canonicalize_path, extrapath):
path = abspath(path_)
if path and normcase(base[:len(path)]) == normcase(path):
submodpath = [pkg for pkg in base[len(path):].split(os.sep)
if pkg]
if _check_init(path, submodpath[:-1]):
return extrapath[path_].split('.') + submodpath
for path in map(_canonicalize_path, sys.path):
if path and normcase(base).startswith(path):
modpath = [pkg for pkg in base[len(path):].split(os.sep) if pkg]
if _check_init(path, modpath[:-1]):
return modpath
raise ImportError('Unable to find module for %s in %s' % (
filename, ', \n'.join(sys.path)))
def file_from_modpath(modpath, path=None, context_file=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file, giving priority to source file over precompiled
file if it exists
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.')
(this means explicit relative imports that start with dots have
empty strings in this list!)
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the path to the module's file or None if it's an integrated
builtin module such as 'sys'
"""
if context_file is not None:
context = dirname(context_file)
else:
context = context_file
if modpath[0] == 'xml':
# handle _xmlplus
try:
return _file_from_modpath(['_xmlplus'] + modpath[1:], path, context)
except ImportError:
return _file_from_modpath(modpath, path, context)
elif modpath == ['os', 'path']:
# FIXME: currently ignoring search_path...
return os.path.__file__
return _file_from_modpath(modpath, path, context)
def get_module_part(dotted_name, context_file=None):
"""given a dotted name return the module part of the name :
>>> get_module_part('logilab.common.modutils.get_module_part')
'logilab.common.modutils'
:type dotted_name: str
:param dotted_name: full name of the identifier we are interested in
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the module part of the name or None if we have not been able at
all to import the given name
XXX: deprecated, since it doesn't handle package precedence over module
(see #10066)
"""
# os.path trick
if dotted_name.startswith('os.path'):
return 'os.path'
parts = dotted_name.split('.')
if context_file is not None:
# first check for builtin module which won't be considered latter
# in that case (path != None)
if parts[0] in BUILTIN_MODULES:
if len(parts) > 2:
raise ImportError(dotted_name)
return parts[0]
# don't use += or insert, we want a new list to be created !
path = None
starti = 0
if parts[0] == '':
assert context_file is not None, \
'explicit relative import, but no context_file?'
path = [] # prevent resolving the import non-relatively
starti = 1
while parts[starti] == '': # for all further dots: change context
starti += 1
context_file = dirname(context_file)
for i in range(starti, len(parts)):
try:
file_from_modpath(parts[starti:i+1],
path=path, context_file=context_file)
except ImportError:
if not i >= max(1, len(parts) - 2):
raise
return '.'.join(parts[:i])
return dotted_name
def get_modules(package, src_directory, blacklist=STD_BLACKLIST):
"""given a package directory return a list of all available python
modules in the package and its subpackages
:type package: str
:param package: the python name for the package
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to
the value of `logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python modules in the package and its
subpackages
"""
modules = []
for directory, dirnames, filenames in os.walk(src_directory):
_handle_blacklist(blacklist, dirnames, filenames)
# check for __init__.py
if not '__init__.py' in filenames:
dirnames[:] = ()
continue
if directory != src_directory:
dir_package = directory[len(src_directory):].replace(os.sep, '.')
modules.append(package + dir_package)
for filename in filenames:
if _is_python_file(filename) and filename != '__init__.py':
src = join(directory, filename)
module = package + src[len(src_directory):-3]
modules.append(module.replace(os.sep, '.'))
return modules
def get_module_files(src_directory, blacklist=STD_BLACKLIST):
"""given a package directory return a list of all available python
module's files in the package and its subpackages
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python module's files in the package and
its subpackages
"""
files = []
for directory, dirnames, filenames in os.walk(src_directory):
_handle_blacklist(blacklist, dirnames, filenames)
# check for __init__.py
if not '__init__.py' in filenames:
dirnames[:] = ()
continue
for filename in filenames:
if _is_python_file(filename):
src = join(directory, filename)
files.append(src)
return files
def get_source_file(filename, include_no_ext=False):
"""given a python module's file name return the matching source file
name (the filename will be returned identically if it's a already an
absolute path to a python source file...)
:type filename: str
:param filename: python module's file name
:raise NoSourceFile: if no source file exists on the file system
:rtype: str
:return: the absolute path of the source file if it exists
"""
base, orig_ext = splitext(abspath(filename))
for ext in PY_SOURCE_EXTS:
source_path = '%s.%s' % (base, ext)
if exists(source_path):
return source_path
if include_no_ext and not orig_ext and exists(base):
return base
raise NoSourceFile(filename)
def cleanup_sys_modules(directories):
"""remove submodules of `directories` from `sys.modules`"""
cleaned = []
for modname, module in list(sys.modules.items()):
modfile = getattr(module, '__file__', None)
if modfile:
for directory in directories:
if modfile.startswith(directory):
cleaned.append(modname)
del sys.modules[modname]
break
return cleaned
def clean_sys_modules(names):
"""remove submodules starting with name from `names` from `sys.modules`"""
cleaned = set()
for modname in list(sys.modules):
for name in names:
if modname.startswith(name):
del sys.modules[modname]
cleaned.add(modname)
break
return cleaned
def is_python_source(filename):
"""
rtype: bool
return: True if the filename is a python source file
"""
return splitext(filename)[1][1:] in PY_SOURCE_EXTS
def is_standard_module(modname, std_path=(STD_LIB_DIR,)):
"""try to guess if a module is a standard python module (by default,
see `std_path` parameter's description)
:type modname: str
:param modname: name of the module we are interested in
:type std_path: list(str) or tuple(str)
:param std_path: list of path considered as standard
:rtype: bool
:return:
true if the module:
- is located on the path listed in one of the directory in `std_path`
- is a built-in module
Note: this function is known to return wrong values when inside virtualenv.
See https://www.logilab.org/ticket/294756.
"""
modname = modname.split('.')[0]
try:
filename = file_from_modpath([modname])
except ImportError as ex:
# import failed, i'm probably not so wrong by supposing it's
# not standard...
return False
# modules which are not living in a file are considered standard
# (sys and __builtin__ for instance)
if filename is None:
# we assume there are no namespaces in stdlib
return not _is_namespace(modname)
filename = abspath(filename)
if filename.startswith(EXT_LIB_DIR):
return False
for path in std_path:
if filename.startswith(abspath(path)):
return True
return False
def is_relative(modname, from_file):
"""return true if the given module name is relative to the given
file name
:type modname: str
:param modname: name of the module we are interested in
:type from_file: str
:param from_file:
path of the module from which modname has been imported
:rtype: bool
:return:
true if the module has been imported relatively to `from_file`
"""
if not isdir(from_file):
from_file = dirname(from_file)
if from_file in sys.path:
return False
try:
find_module(modname.split('.')[0], [from_file])
return True
except ImportError:
return False
# internal only functions #####################################################
def _file_from_modpath(modpath, path=None, context=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file
this function is used internally, see `file_from_modpath`'s
documentation for more information
"""
assert len(modpath) > 0
if context is not None:
try:
mtype, mp_filename = _module_file(modpath, [context])
except ImportError:
mtype, mp_filename = _module_file(modpath, path)
else:
mtype, mp_filename = _module_file(modpath, path)
if mtype == PY_COMPILED:
try:
return get_source_file(mp_filename)
except NoSourceFile:
return mp_filename
elif mtype == C_BUILTIN:
# integrated builtin module
return None
elif mtype == PKG_DIRECTORY:
mp_filename = _has_init(mp_filename)
return mp_filename
def _search_zip(modpath, pic):
for filepath, importer in pic.items():
if importer is not None:
if importer.find_module(modpath[0]):
if not importer.find_module('/'.join(modpath)):
raise ImportError('No module named %s in %s/%s' % (
'.'.join(modpath[1:]), filepath, modpath))
return ZIPFILE, abspath(filepath) + '/' + '/'.join(modpath), filepath
raise ImportError('No module named %s' % '.'.join(modpath))
try:
import pkg_resources
except ImportError:
pkg_resources = None
def _is_namespace(modname):
return (pkg_resources is not None
and modname in pkg_resources._namespace_packages)
def _module_file(modpath, path=None):
"""get a module type / file path
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.'), with leading empty strings for explicit relative import
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:rtype: tuple(int, str)
:return: the module type flag and the file path for a module
"""
# egg support compat
try:
pic = sys.path_importer_cache
_path = (path is None and sys.path or path)
for __path in _path:
if not __path in pic:
try:
pic[__path] = zipimport.zipimporter(__path)
except zipimport.ZipImportError:
pic[__path] = None
checkeggs = True
except AttributeError:
checkeggs = False
# pkg_resources support (aka setuptools namespace packages)
if (_is_namespace(modpath[0]) and modpath[0] in sys.modules):
# setuptools has added into sys.modules a module object with proper
# __path__, get back information from there
module = sys.modules[modpath.pop(0)]
# use list() to protect against _NamespacePath instance we get with python 3, which
# find_module later doesn't like
path = list(module.__path__)
if not modpath:
return C_BUILTIN, None
imported = []
while modpath:
modname = modpath[0]
# take care to changes in find_module implementation wrt builtin modules
#
# Python 2.6.6 (r266:84292, Sep 11 2012, 08:34:23)
# >>> imp.find_module('posix')
# (None, 'posix', ('', '', 6))
#
# Python 3.3.1 (default, Apr 26 2013, 12:08:46)
# >>> imp.find_module('posix')
# (None, None, ('', '', 6))
try:
_, mp_filename, mp_desc = find_module(modname, path)
except ImportError:
if checkeggs:
return _search_zip(modpath, pic)[:2]
raise
else:
if checkeggs and mp_filename:
fullabspath = [abspath(x) for x in _path]
try:
pathindex = fullabspath.index(dirname(abspath(mp_filename)))
emtype, emp_filename, zippath = _search_zip(modpath, pic)
if pathindex > _path.index(zippath):
# an egg takes priority
return emtype, emp_filename
except ValueError:
# XXX not in _path
pass
except ImportError:
pass
checkeggs = False
imported.append(modpath.pop(0))
mtype = mp_desc[2]
if modpath:
if mtype != PKG_DIRECTORY:
raise ImportError('No module %s in %s' % ('.'.join(modpath),
'.'.join(imported)))
# XXX guess if package is using pkgutil.extend_path by looking for
# those keywords in the first four Kbytes
try:
with open(join(mp_filename, '__init__.py')) as stream:
data = stream.read(4096)
except IOError:
path = [mp_filename]
else:
if 'pkgutil' in data and 'extend_path' in data:
# extend_path is called, search sys.path for module/packages
# of this name see pkgutil.extend_path documentation
path = [join(p, *imported) for p in sys.path
if isdir(join(p, *imported))]
else:
path = [mp_filename]
return mtype, mp_filename
def _is_python_file(filename):
"""return true if the given filename should be considered as a python file
.pyc and .pyo are ignored
"""
for ext in ('.py', '.so', '.pyd', '.pyw'):
if filename.endswith(ext):
return True
return False
def _has_init(directory):
"""if the given directory has a valid __init__ file, return its path,
else return None
"""
mod_or_pack = join(directory, '__init__')
for ext in PY_SOURCE_EXTS + ('pyc', 'pyo'):
if exists(mod_or_pack + '.' + ext):
return mod_or_pack + '.' + ext
return None
|
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.utils.translation import gettext as _
from django.views.decorators.http import require_POST
from weblate.trans.tasks import perform_commit
from weblate.trans.util import redirect_param
from weblate.utils import messages
from weblate.utils.views import get_component, get_project
@require_POST
@login_required
def lock_component(request, project, component):
obj = get_component(request, project, component)
if not request.user.has_perm("component.lock", obj):
raise PermissionDenied()
obj.do_lock(request.user)
perform_commit.delay(obj.pk, "lock", None)
messages.success(request, _("Component is now locked for translation updates!"))
return redirect_param(obj, "#repository")
@require_POST
@login_required
def unlock_component(request, project, component):
obj = get_component(request, project, component)
if not request.user.has_perm("component.lock", obj):
raise PermissionDenied()
obj.do_lock(request.user, False)
messages.success(request, _("Component is now open for translation updates."))
return redirect_param(obj, "#repository")
@require_POST
@login_required
def lock_project(request, project):
obj = get_project(request, project)
if not request.user.has_perm("component.lock", obj):
raise PermissionDenied()
for component in obj.component_set.iterator():
component.do_lock(request.user)
perform_commit.delay(component.pk, "lock", None)
messages.success(
request, _("All components are now locked for translation updates!")
)
return redirect_param(obj, "#repository")
@require_POST
@login_required
def unlock_project(request, project):
obj = get_project(request, project)
if not request.user.has_perm("component.lock", obj):
raise PermissionDenied()
for component in obj.component_set.iterator():
component.do_lock(request.user, False)
messages.success(request, _("Project is now open for translation updates."))
return redirect_param(obj, "#repository")
|
from datetime import datetime, timedelta
from os import path
import statistics
import unittest
import pytest
from homeassistant import config as hass_config
from homeassistant.components import recorder
from homeassistant.components.statistics.sensor import DOMAIN, StatisticsSensor
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
SERVICE_RELOAD,
STATE_UNKNOWN,
TEMP_CELSIUS,
)
from homeassistant.setup import async_setup_component, setup_component
from homeassistant.util import dt as dt_util
from tests.async_mock import patch
from tests.common import (
fire_time_changed,
get_test_home_assistant,
init_recorder_component,
)
from tests.components.recorder.common import wait_recording_done
@pytest.fixture(autouse=True)
def mock_legacy_time(legacy_patchable_time):
"""Make time patchable for all the tests."""
yield
class TestStatisticsSensor(unittest.TestCase):
"""Test the Statistics sensor."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.values = [17, 20, 15.2, 5, 3.8, 9.2, 6.7, 14, 6]
self.count = len(self.values)
self.min = min(self.values)
self.max = max(self.values)
self.total = sum(self.values)
self.mean = round(sum(self.values) / len(self.values), 2)
self.median = round(statistics.median(self.values), 2)
self.deviation = round(statistics.stdev(self.values), 2)
self.variance = round(statistics.variance(self.values), 2)
self.change = round(self.values[-1] - self.values[0], 2)
self.average_change = round(self.change / (len(self.values) - 1), 2)
self.change_rate = round(self.change / (60 * (self.count - 1)), 2)
self.addCleanup(self.hass.stop)
def test_binary_sensor_source(self):
"""Test if source is a sensor."""
values = ["on", "off", "on", "off", "on", "off", "on"]
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "binary_sensor.test_monitored",
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
for value in values:
self.hass.states.set("binary_sensor.test_monitored", value)
self.hass.block_till_done()
state = self.hass.states.get("sensor.test")
assert str(len(values)) == state.state
def test_sensor_source(self):
"""Test if source is a sensor."""
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "sensor.test_monitored",
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
for value in self.values:
self.hass.states.set(
"sensor.test_monitored", value, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
self.hass.block_till_done()
state = self.hass.states.get("sensor.test")
assert str(self.mean) == state.state
assert self.min == state.attributes.get("min_value")
assert self.max == state.attributes.get("max_value")
assert self.variance == state.attributes.get("variance")
assert self.median == state.attributes.get("median")
assert self.deviation == state.attributes.get("standard_deviation")
assert self.mean == state.attributes.get("mean")
assert self.count == state.attributes.get("count")
assert self.total == state.attributes.get("total")
assert TEMP_CELSIUS == state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
assert self.change == state.attributes.get("change")
assert self.average_change == state.attributes.get("average_change")
def test_sampling_size(self):
"""Test rotation."""
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "sensor.test_monitored",
"sampling_size": 5,
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
for value in self.values:
self.hass.states.set(
"sensor.test_monitored", value, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
self.hass.block_till_done()
state = self.hass.states.get("sensor.test")
assert 3.8 == state.attributes.get("min_value")
assert 14 == state.attributes.get("max_value")
def test_sampling_size_1(self):
"""Test validity of stats requiring only one sample."""
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "sensor.test_monitored",
"sampling_size": 1,
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
for value in self.values[-3:]: # just the last 3 will do
self.hass.states.set(
"sensor.test_monitored", value, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
self.hass.block_till_done()
state = self.hass.states.get("sensor.test")
# require only one data point
assert self.values[-1] == state.attributes.get("min_value")
assert self.values[-1] == state.attributes.get("max_value")
assert self.values[-1] == state.attributes.get("mean")
assert self.values[-1] == state.attributes.get("median")
assert self.values[-1] == state.attributes.get("total")
assert 0 == state.attributes.get("change")
assert 0 == state.attributes.get("average_change")
# require at least two data points
assert STATE_UNKNOWN == state.attributes.get("variance")
assert STATE_UNKNOWN == state.attributes.get("standard_deviation")
def test_max_age(self):
"""Test value deprecation."""
now = dt_util.utcnow()
mock_data = {
"return_time": datetime(now.year + 1, 8, 2, 12, 23, tzinfo=dt_util.UTC)
}
def mock_now():
return mock_data["return_time"]
with patch(
"homeassistant.components.statistics.sensor.dt_util.utcnow", new=mock_now
):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "sensor.test_monitored",
"max_age": {"minutes": 3},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
for value in self.values:
self.hass.states.set(
"sensor.test_monitored",
value,
{ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS},
)
self.hass.block_till_done()
# insert the next value one minute later
mock_data["return_time"] += timedelta(minutes=1)
state = self.hass.states.get("sensor.test")
assert 6 == state.attributes.get("min_value")
assert 14 == state.attributes.get("max_value")
def test_max_age_without_sensor_change(self):
"""Test value deprecation."""
now = dt_util.utcnow()
mock_data = {
"return_time": datetime(now.year + 1, 8, 2, 12, 23, tzinfo=dt_util.UTC)
}
def mock_now():
return mock_data["return_time"]
with patch(
"homeassistant.components.statistics.sensor.dt_util.utcnow", new=mock_now
):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "sensor.test_monitored",
"max_age": {"minutes": 3},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
for value in self.values:
self.hass.states.set(
"sensor.test_monitored",
value,
{ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS},
)
self.hass.block_till_done()
# insert the next value 30 seconds later
mock_data["return_time"] += timedelta(seconds=30)
state = self.hass.states.get("sensor.test")
assert 3.8 == state.attributes.get("min_value")
assert 15.2 == state.attributes.get("max_value")
# wait for 3 minutes (max_age).
mock_data["return_time"] += timedelta(minutes=3)
fire_time_changed(self.hass, mock_data["return_time"])
self.hass.block_till_done()
state = self.hass.states.get("sensor.test")
assert state.attributes.get("min_value") == STATE_UNKNOWN
assert state.attributes.get("max_value") == STATE_UNKNOWN
assert state.attributes.get("count") == 0
def test_change_rate(self):
"""Test min_age/max_age and change_rate."""
now = dt_util.utcnow()
mock_data = {
"return_time": datetime(now.year + 1, 8, 2, 12, 23, 42, tzinfo=dt_util.UTC)
}
def mock_now():
return mock_data["return_time"]
with patch(
"homeassistant.components.statistics.sensor.dt_util.utcnow", new=mock_now
):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "sensor.test_monitored",
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
for value in self.values:
self.hass.states.set(
"sensor.test_monitored",
value,
{ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS},
)
self.hass.block_till_done()
# insert the next value one minute later
mock_data["return_time"] += timedelta(minutes=1)
state = self.hass.states.get("sensor.test")
assert datetime(
now.year + 1, 8, 2, 12, 23, 42, tzinfo=dt_util.UTC
) == state.attributes.get("min_age")
assert datetime(
now.year + 1, 8, 2, 12, 23 + self.count - 1, 42, tzinfo=dt_util.UTC
) == state.attributes.get("max_age")
assert self.change_rate == state.attributes.get("change_rate")
def test_initialize_from_database(self):
"""Test initializing the statistics from the database."""
# enable the recorder
init_recorder_component(self.hass)
self.hass.block_till_done()
self.hass.data[recorder.DATA_INSTANCE].block_till_done()
# store some values
for value in self.values:
self.hass.states.set(
"sensor.test_monitored", value, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
self.hass.block_till_done()
# wait for the recorder to really store the data
wait_recording_done(self.hass)
# only now create the statistics component, so that it must read the
# data from the database
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "sensor.test_monitored",
"sampling_size": 100,
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
# check if the result is as in test_sensor_source()
state = self.hass.states.get("sensor.test")
assert str(self.mean) == state.state
def test_initialize_from_database_with_maxage(self):
"""Test initializing the statistics from the database."""
now = dt_util.utcnow()
mock_data = {
"return_time": datetime(now.year + 1, 8, 2, 12, 23, 42, tzinfo=dt_util.UTC)
}
def mock_now():
return mock_data["return_time"]
# Testing correct retrieval from recorder, thus we do not
# want purging to occur within the class itself.
def mock_purge(self):
return
# Set maximum age to 3 hours.
max_age = 3
# Determine what our minimum age should be based on test values.
expected_min_age = mock_data["return_time"] + timedelta(
hours=len(self.values) - max_age
)
# enable the recorder
init_recorder_component(self.hass)
self.hass.block_till_done()
self.hass.data[recorder.DATA_INSTANCE].block_till_done()
with patch(
"homeassistant.components.statistics.sensor.dt_util.utcnow", new=mock_now
), patch.object(StatisticsSensor, "_purge_old", mock_purge):
# store some values
for value in self.values:
self.hass.states.set(
"sensor.test_monitored",
value,
{ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS},
)
self.hass.block_till_done()
# insert the next value 1 hour later
mock_data["return_time"] += timedelta(hours=1)
# wait for the recorder to really store the data
wait_recording_done(self.hass)
# only now create the statistics component, so that it must read
# the data from the database
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "sensor.test_monitored",
"sampling_size": 100,
"max_age": {"hours": max_age},
}
},
)
self.hass.block_till_done()
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
# check if the result is as in test_sensor_source()
state = self.hass.states.get("sensor.test")
assert expected_min_age == state.attributes.get("min_age")
# The max_age timestamp should be 1 hour before what we have right
# now in mock_data['return_time'].
assert mock_data["return_time"] == state.attributes.get("max_age") + timedelta(
hours=1
)
async def test_reload(hass):
"""Verify we can reload filter sensors."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
hass.states.async_set("sensor.test_monitored", 12345)
await async_setup_component(
hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "sensor.test_monitored",
"sampling_size": 100,
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert hass.states.get("sensor.test")
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"statistics/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert hass.states.get("sensor.test") is None
assert hass.states.get("sensor.cputest")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
|
import inspect
import logging
import socket
import sys
import time
from queue import Empty
from typing import Any
from typing import List
from typing import Type
import service_configuration_lib
from kazoo.client import KazooClient
from paasta_tools.deployd import watchers
from paasta_tools.deployd.common import DelayDeadlineQueue
from paasta_tools.deployd.common import DelayDeadlineQueueProtocol
from paasta_tools.deployd.common import get_marathon_clients_from_config
from paasta_tools.deployd.common import PaastaQueue
from paasta_tools.deployd.common import PaastaThread
from paasta_tools.deployd.common import ServiceInstance
from paasta_tools.deployd.leader import PaastaLeaderElection
from paasta_tools.deployd.metrics import QueueAndWorkerMetrics
from paasta_tools.deployd.queue import ZKDelayDeadlineQueue
from paasta_tools.deployd.workers import PaastaDeployWorker
from paasta_tools.list_marathon_service_instances import (
get_service_instances_that_need_bouncing,
)
from paasta_tools.marathon_tools import DEFAULT_SOA_DIR
from paasta_tools.metrics.metrics_lib import get_metrics_interface
from paasta_tools.utils import get_services_for_cluster
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import ZookeeperPool
# Broken out into a constant so that we don't get drift between this and the code in paasta_deployd_steps.py that
# searches for this message.
DEAD_DEPLOYD_WORKER_MESSAGE = "Detected a dead worker, starting a replacement thread"
class AddHostnameFilter(logging.Filter):
def __init__(self) -> None:
super().__init__()
self.hostname = socket.gethostname()
def filter(self, record: Any) -> bool:
record.hostname = self.hostname
return True
class DeployDaemon(PaastaThread):
def __init__(self) -> None:
super().__init__()
self.started = False
self.daemon = True
service_configuration_lib.disable_yaml_cache()
self.config = load_system_paasta_config()
self.setup_logging()
self.metrics = get_metrics_interface("paasta.deployd")
self.setup_instances_to_bounce()
self.control = PaastaQueue("ControlQueue")
self.marathon_clients = get_marathon_clients_from_config()
def setup_instances_to_bounce(self) -> None:
if self.config.get_deployd_use_zk_queue():
zk_client = KazooClient(hosts=self.config.get_zk_hosts())
zk_client.start()
self.instances_to_bounce: DelayDeadlineQueueProtocol = ZKDelayDeadlineQueue(
client=zk_client
)
else:
self.instances_to_bounce = DelayDeadlineQueue()
def setup_logging(self) -> None:
root_logger = logging.getLogger()
root_logger.setLevel(getattr(logging, self.config.get_deployd_log_level()))
handler = logging.StreamHandler()
handler.addFilter(AddHostnameFilter())
root_logger.addHandler(handler)
logging.getLogger("kazoo").setLevel(logging.CRITICAL)
handler.setFormatter(
logging.Formatter(
"%(asctime)s:%(hostname)s:%(levelname)s:%(name)s:%(message)s"
)
)
def run(self) -> None:
self.log.info("paasta-deployd starting up...")
startup_counter = self.metrics.create_counter(
"process_started", paasta_cluster=self.config.get_cluster()
)
startup_counter.count()
with ZookeeperPool() as self.zk:
self.election = PaastaLeaderElection(
self.zk,
"/paasta-deployd-leader",
socket.getfqdn(),
control=self.control,
)
self.is_leader = False
self.log.info("Waiting to become leader")
self.election.run(self.startup)
self.log.info("Leadership given up, exiting...")
@property
def watcher_threads_enabled(self) -> List[Type[watchers.PaastaWatcher]]:
disabled_watchers = self.config.get_disabled_watchers()
watcher_classes = [
obj[1]
for obj in inspect.getmembers(watchers)
if inspect.isclass(obj[1]) and obj[1].__bases__[0] == watchers.PaastaWatcher
]
enabled_watchers = [
x for x in watcher_classes if x.__name__ not in disabled_watchers
]
return enabled_watchers
def startup(self) -> None:
self.is_leader = True
self.log.info("This node is elected as leader {}".format(socket.getfqdn()))
leader_counter = self.metrics.create_counter(
"leader_elections", paasta_cluster=self.config.get_cluster()
)
leader_counter.count()
self.log.info("Starting all watcher threads")
self.start_watchers()
self.log.info(
"All watchers started, now adding all services for initial bounce"
)
# Fill the queue if we are not using the persistent ZK queue
if not self.config.get_deployd_use_zk_queue():
self.add_all_services()
self.log.info("Prioritising services that we know need a bounce...")
if self.config.get_deployd_startup_oracle_enabled():
self.prioritise_bouncing_services()
self.log.info("Starting worker threads")
self.start_workers()
QueueAndWorkerMetrics(
queue=self.instances_to_bounce,
workers=self.workers,
cluster=self.config.get_cluster(),
metrics_provider=self.metrics,
).start()
self.started = True
self.log.info("Startup finished!")
self.main_loop()
def main_loop(self) -> None:
while True:
try:
message = self.control.get(block=False)
except Empty:
message = None
if message == "ABORT":
self.log.info("Got ABORT message, main_loop exiting...")
break
if not self.all_watchers_running():
self.log.error("One or more watcher died, committing suicide!")
sys.exit(1)
if self.all_workers_dead():
self.log.error("All workers have died, committing suicide!")
sys.exit(1)
self.check_and_start_workers()
time.sleep(0.1)
def all_watchers_running(self) -> bool:
return all([watcher.is_alive() for watcher in self.watcher_threads])
def all_workers_dead(self) -> bool:
return all([not worker.is_alive() for worker in self.workers])
def check_and_start_workers(self) -> None:
live_workers = len([worker for worker in self.workers if worker.is_alive()])
number_of_dead_workers = self.config.get_deployd_number_workers() - live_workers
for i in range(number_of_dead_workers):
self.log.error(DEAD_DEPLOYD_WORKER_MESSAGE)
worker_no = len(self.workers) + 1
worker = PaastaDeployWorker(
worker_no, self.instances_to_bounce, self.config, self.metrics
)
worker.start()
self.workers.append(worker)
def stop(self) -> None:
self.control.put("ABORT")
def start_workers(self) -> None:
self.workers: List[PaastaDeployWorker] = []
for i in range(self.config.get_deployd_number_workers()):
worker = PaastaDeployWorker(
i, self.instances_to_bounce, self.config, self.metrics
)
worker.start()
self.workers.append(worker)
def add_all_services(self) -> None:
instances = get_services_for_cluster(
cluster=self.config.get_cluster(),
instance_type="marathon",
soa_dir=DEFAULT_SOA_DIR,
)
for service, instance in instances:
self.instances_to_bounce.put(
ServiceInstance(
service=service,
instance=instance,
watcher="daemon_start",
bounce_by=time.time()
+ self.config.get_deployd_startup_bounce_deadline(),
wait_until=time.time(),
failures=0,
bounce_start_time=time.time(),
enqueue_time=time.time(),
)
)
def prioritise_bouncing_services(self) -> None:
service_instances = get_service_instances_that_need_bouncing(
self.marathon_clients, DEFAULT_SOA_DIR
)
now = time.time()
for service_instance in service_instances:
self.log.info(f"Prioritising {service_instance} to be bounced immediately")
service, instance = service_instance.split(".")
self.instances_to_bounce.put(
ServiceInstance(
service=service,
instance=instance,
watcher=type(self).__name__,
bounce_by=now,
wait_until=now,
failures=0,
bounce_start_time=time.time(),
enqueue_time=time.time(),
)
)
def start_watchers(self) -> None:
""" should block until all threads happy"""
self.watcher_threads = [
watcher(
instances_to_bounce=self.instances_to_bounce,
cluster=self.config.get_cluster(),
zookeeper_client=self.zk,
config=self.config,
)
for watcher in self.watcher_threads_enabled
]
self.log.info(f"Starting the following watchers {self.watcher_threads}")
for watcher in self.watcher_threads:
watcher.start()
self.log.info("Waiting for all watchers to start")
attempts = 0
while attempts < 120:
if all([watcher.is_ready for watcher in self.watcher_threads]):
return
self.log.info("Sleeping and waiting for watchers to all start")
self.log.info(
"Waiting on: {}".format(
[
watcher.__class__.__name__
for watcher in self.watcher_threads
if not watcher.is_ready
]
)
)
time.sleep(1)
attempts += 1
self.log.error("Failed to start all the watchers, exiting...")
sys.exit(1)
def main() -> None:
dd = DeployDaemon()
dd.start()
while dd.is_alive():
time.sleep(0.1)
if __name__ == "__main__":
main()
|
from datetime import timedelta
import logging
import github
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_NAME,
CONF_ACCESS_TOKEN,
CONF_NAME,
CONF_PATH,
CONF_URL,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_REPOS = "repositories"
ATTR_LATEST_COMMIT_MESSAGE = "latest_commit_message"
ATTR_LATEST_COMMIT_SHA = "latest_commit_sha"
ATTR_LATEST_RELEASE_TAG = "latest_release_tag"
ATTR_LATEST_RELEASE_URL = "latest_release_url"
ATTR_LATEST_OPEN_ISSUE_URL = "latest_open_issue_url"
ATTR_OPEN_ISSUES = "open_issues"
ATTR_LATEST_OPEN_PULL_REQUEST_URL = "latest_open_pull_request_url"
ATTR_OPEN_PULL_REQUESTS = "open_pull_requests"
ATTR_PATH = "path"
ATTR_STARGAZERS = "stargazers"
ATTR_FORKS = "forks"
ATTR_CLONES = "clones"
ATTR_CLONES_UNIQUE = "clones_unique"
ATTR_VIEWS = "views"
ATTR_VIEWS_UNIQUE = "views_unique"
DEFAULT_NAME = "GitHub"
SCAN_INTERVAL = timedelta(seconds=300)
REPO_SCHEMA = vol.Schema(
{vol.Required(CONF_PATH): cv.string, vol.Optional(CONF_NAME): cv.string}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_URL): cv.url,
vol.Required(CONF_REPOS): vol.All(cv.ensure_list, [REPO_SCHEMA]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the GitHub sensor platform."""
sensors = []
for repository in config[CONF_REPOS]:
data = GitHubData(
repository=repository,
access_token=config.get(CONF_ACCESS_TOKEN),
server_url=config.get(CONF_URL),
)
if data.setup_error is True:
_LOGGER.error(
"Error setting up GitHub platform. %s",
"Check previous errors for details",
)
else:
sensors.append(GitHubSensor(data))
add_entities(sensors, True)
class GitHubSensor(Entity):
"""Representation of a GitHub sensor."""
def __init__(self, github_data):
"""Initialize the GitHub sensor."""
self._unique_id = github_data.repository_path
self._name = None
self._state = None
self._available = False
self._repository_path = None
self._latest_commit_message = None
self._latest_commit_sha = None
self._latest_release_tag = None
self._latest_release_url = None
self._open_issue_count = None
self._latest_open_issue_url = None
self._pull_request_count = None
self._latest_open_pr_url = None
self._stargazers = None
self._forks = None
self._clones = None
self._clones_unique = None
self._views = None
self._views_unique = None
self._github_data = github_data
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return unique ID for the sensor."""
return self._unique_id
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {
ATTR_PATH: self._repository_path,
ATTR_NAME: self._name,
ATTR_LATEST_COMMIT_MESSAGE: self._latest_commit_message,
ATTR_LATEST_COMMIT_SHA: self._latest_commit_sha,
ATTR_LATEST_RELEASE_URL: self._latest_release_url,
ATTR_LATEST_OPEN_ISSUE_URL: self._latest_open_issue_url,
ATTR_OPEN_ISSUES: self._open_issue_count,
ATTR_LATEST_OPEN_PULL_REQUEST_URL: self._latest_open_pr_url,
ATTR_OPEN_PULL_REQUESTS: self._pull_request_count,
ATTR_STARGAZERS: self._stargazers,
ATTR_FORKS: self._forks,
}
if self._latest_release_tag is not None:
attrs[ATTR_LATEST_RELEASE_TAG] = self._latest_release_tag
if self._clones is not None:
attrs[ATTR_CLONES] = self._clones
if self._clones_unique is not None:
attrs[ATTR_CLONES_UNIQUE] = self._clones_unique
if self._views is not None:
attrs[ATTR_VIEWS] = self._views
if self._views_unique is not None:
attrs[ATTR_VIEWS_UNIQUE] = self._views_unique
return attrs
@property
def icon(self):
"""Return the icon to use in the frontend."""
return "mdi:github"
def update(self):
"""Collect updated data from GitHub API."""
self._github_data.update()
self._name = self._github_data.name
self._repository_path = self._github_data.repository_path
self._available = self._github_data.available
self._latest_commit_message = self._github_data.latest_commit_message
self._latest_commit_sha = self._github_data.latest_commit_sha
if self._github_data.latest_release_url is not None:
self._latest_release_tag = self._github_data.latest_release_url.split(
"tag/"
)[1]
else:
self._latest_release_tag = None
self._latest_release_url = self._github_data.latest_release_url
self._state = self._github_data.latest_commit_sha[0:7]
self._open_issue_count = self._github_data.open_issue_count
self._latest_open_issue_url = self._github_data.latest_open_issue_url
self._pull_request_count = self._github_data.pull_request_count
self._latest_open_pr_url = self._github_data.latest_open_pr_url
self._stargazers = self._github_data.stargazers
self._forks = self._github_data.forks
self._clones = self._github_data.clones
self._clones_unique = self._github_data.clones_unique
self._views = self._github_data.views
self._views_unique = self._github_data.views_unique
class GitHubData:
"""GitHub Data object."""
def __init__(self, repository, access_token=None, server_url=None):
"""Set up GitHub."""
self._github = github
self.setup_error = False
try:
if server_url is not None:
server_url += "/api/v3"
self._github_obj = github.Github(access_token, base_url=server_url)
else:
self._github_obj = github.Github(access_token)
self.repository_path = repository[CONF_PATH]
repo = self._github_obj.get_repo(self.repository_path)
except self._github.GithubException as err:
_LOGGER.error("GitHub error for %s: %s", self.repository_path, err)
self.setup_error = True
return
self.name = repository.get(CONF_NAME, repo.name)
self.available = False
self.latest_commit_message = None
self.latest_commit_sha = None
self.latest_release_url = None
self.open_issue_count = None
self.latest_open_issue_url = None
self.pull_request_count = None
self.latest_open_pr_url = None
self.stargazers = None
self.forks = None
self.clones = None
self.clones_unique = None
self.views = None
self.views_unique = None
def update(self):
"""Update GitHub Sensor."""
try:
repo = self._github_obj.get_repo(self.repository_path)
self.stargazers = repo.stargazers_count
self.forks = repo.forks_count
open_issues = repo.get_issues(state="open", sort="created")
if open_issues is not None:
self.open_issue_count = open_issues.totalCount
if open_issues.totalCount > 0:
self.latest_open_issue_url = open_issues[0].html_url
open_pull_requests = repo.get_pulls(state="open", sort="created")
if open_pull_requests is not None:
self.pull_request_count = open_pull_requests.totalCount
if open_pull_requests.totalCount > 0:
self.latest_open_pr_url = open_pull_requests[0].html_url
latest_commit = repo.get_commits()[0]
self.latest_commit_sha = latest_commit.sha
self.latest_commit_message = latest_commit.commit.message
releases = repo.get_releases()
if releases and releases.totalCount > 0:
self.latest_release_url = releases[0].html_url
if repo.permissions.push:
clones = repo.get_clones_traffic()
if clones is not None:
self.clones = clones.get("count")
self.clones_unique = clones.get("uniques")
views = repo.get_views_traffic()
if views is not None:
self.views = views.get("count")
self.views_unique = views.get("uniques")
self.available = True
except self._github.GithubException as err:
_LOGGER.error("GitHub error for %s: %s", self.repository_path, err)
self.available = False
|
from absl import flags
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
class RedshiftClusterSubnetGroup(resource.BaseResource):
"""Cluster Subnet Group associated with a Redshift cluster launched in a vpc.
A cluster subnet group allows you to specify a set of subnets in your VPC.
Attributes:
name: A string name of the cluster subnet group.
subnet_id: A string name of the subnet id associated with the group.
"""
def __init__(self, cmd_prefix):
super(RedshiftClusterSubnetGroup, self).__init__(user_managed=False)
self.cmd_prefix = cmd_prefix
self.name = 'pkb-' + FLAGS.run_uri
self.subnet_id = ''
def _Create(self):
cmd = self.cmd_prefix + [
'redshift', 'create-cluster-subnet-group',
'--cluster-subnet-group-name', self.name, '--description',
'Cluster Subnet Group for run uri {}'.format(
FLAGS.run_uri), '--subnet-ids', self.subnet_id
]
vm_util.IssueCommand(cmd)
def _Delete(self):
"""Delete a redshift cluster subnet group."""
cmd = self.cmd_prefix + [
'redshift', 'delete-cluster-subnet-group',
'--cluster-subnet-group-name', self.name
]
vm_util.IssueCommand(cmd, raise_on_failure=False)
|
import asyncio
import logging
from aiohttp import ClientError
from smart_meter_texas import Account, Client
from smart_meter_texas.exceptions import (
SmartMeterTexasAPIError,
SmartMeterTexasAuthError,
)
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import aiohttp_client
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}
)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
client_session = aiohttp_client.async_get_clientsession(hass)
account = Account(data["username"], data["password"])
client = Client(client_session, account)
try:
await client.authenticate()
except (asyncio.TimeoutError, ClientError, SmartMeterTexasAPIError) as error:
raise CannotConnect from error
except SmartMeterTexasAuthError as error:
raise InvalidAuth(error) from error
# Return info that you want to store in the config entry.
return {"title": account.username}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Smart Meter Texas."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
if not errors:
# Ensure the same account cannot be setup more than once.
await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
import datetime
import json
import logging
import time
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import relational_db
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import azure
from perfkitbenchmarker.providers.azure import azure_network
from perfkitbenchmarker.providers.azure import util
DEFAULT_DATABASE_NAME = 'database'
FLAGS = flags.FLAGS
DEFAULT_MYSQL_VERSION = '5.7'
DEFAULT_MYSQL_PORT = 3306
DEFAULT_POSTGRES_VERSION = '9.6'
DEFAULT_POSTGRES_PORT = 5432
DEFALUT_SQLSERVER_VERSION = 'DEFAULT'
DEFAULT_SQLSERVER_PORT = 1433
# Disk size configurations details at
# https://docs.microsoft.com/en-us/cli/azure/mysql/server?view=azure-cli-latest#az_mysql_server_create
AZURE_MIN_DB_DISK_SIZE_MB = 5120 # Minimum db disk size supported by Azure
AZURE_MAX_DB_DISK_SIZE_MB = 16777216 # Maximum db disk size supported by Azure
IS_READY_TIMEOUT = 60 * 60 * 1 # 1 hour (might take some time to prepare)
# Longest time recorded is 20 minutes when
# creating STANDARD_D64_V3 - 12/02/2020
# The Azure command timeout with the following error message:
#
# Deployment failed. Correlation ID: fcdc3c76-33cc-4eb1-986c-fbc30ce7d820.
# The operation timed out and automatically rolled back.
# Please retry the operation.
CREATE_AZURE_DB_TIMEOUT = 60 * 30
class AzureRelationalDb(relational_db.BaseRelationalDb):
"""An object representing an Azure RDS relational database.
Currently Postgres is supported. This class requires that a
client vm be available as an attribute on the instance before Create() is
called, which is the current behavior of PKB. This is necessary to setup the
networking correctly. The following steps are performed to provision the
database:
1. create the RDS instance in the requested location.
Instructions from:
https://docs.microsoft.com/en-us/azure/postgresql/quickstart-create-server-database-azure-cli
On teardown, all resources are deleted.
Note that the client VM's location and the location requested for the database
must be the same.
"""
CLOUD = azure.CLOUD
def __init__(self, relational_db_spec):
super(AzureRelationalDb, self).__init__(relational_db_spec)
self.instance_id = 'pkb-db-instance-' + FLAGS.run_uri
if util.IsZone(self.spec.db_spec.zone):
raise errors.Config.InvalidValue(
'Availability zones are currently not supported by Azure DBs')
self.location = util.GetLocationFromZone(self.spec.db_spec.zone)
self.resource_group = azure_network.GetResourceGroup(self.location)
self.unmanaged_db_exists = None if self.is_managed_db else False
def GetResourceMetadata(self):
"""Returns the metadata associated with the resource.
All keys will be prefaced with relational_db before
being published (done in publisher.py).
Returns:
metadata: dict of Azure DB metadata.
"""
metadata = super(AzureRelationalDb, self).GetResourceMetadata()
metadata.update({
'zone': self.spec.db_spec.zone,
})
if hasattr(self.spec.db_disk_spec, 'iops'):
metadata.update({
'disk_iops': self.spec.db_disk_spec.iops,
})
return metadata
@staticmethod
def GetDefaultEngineVersion(engine):
"""Returns the default version of a given database engine.
Args:
engine (string): type of database (my_sql or postgres).
Returns:
(string): Default engine version.
Raises:
RelationalDbEngineNotFoundException: if an unknown engine is
requested.
"""
if engine == relational_db.POSTGRES:
return DEFAULT_POSTGRES_VERSION
elif engine == relational_db.MYSQL:
return DEFAULT_MYSQL_VERSION
elif engine == relational_db.SQLSERVER:
return DEFALUT_SQLSERVER_VERSION
else:
raise relational_db.RelationalDbEngineNotFoundException(
'Unsupported engine {0}'.format(engine))
def GetDefaultPort(self):
"""Returns the default port of a given database engine.
Returns:
(string): Default port
Raises:
RelationalDbEngineNotFoundException: if an unknown engine is
requested.
"""
engine = self.spec.engine
if engine == relational_db.POSTGRES:
return DEFAULT_POSTGRES_PORT
elif engine == relational_db.MYSQL:
return DEFAULT_MYSQL_PORT
elif engine == relational_db.SQLSERVER:
return DEFAULT_SQLSERVER_PORT
raise relational_db.RelationalDbEngineNotFoundException(
'Unsupported engine {0}'.format(engine))
def GetAzCommandForEngine(self):
engine = self.spec.engine
if engine == relational_db.POSTGRES:
return 'postgres'
elif engine == relational_db.MYSQL:
return 'mysql'
elif engine == relational_db.SQLSERVER:
return 'sql'
raise relational_db.RelationalDbEngineNotFoundException(
'Unsupported engine {0}'.format(engine))
def SetDbConfiguration(self, name, value):
"""Set configuration for the database instance.
Args:
name: string, the name of the settings to change
value: value, string the value to set
"""
cmd = [
azure.AZURE_PATH,
self.GetAzCommandForEngine(),
'server',
'configuration',
'set',
'--name',
name,
'--value',
value,
'--resource-group',
self.resource_group.name,
'--server',
self.instance_id
]
vm_util.IssueCommand(cmd)
def RenameDatabase(self, new_name):
"""Renames an the database instace."""
engine = self.spec.engine
if engine == relational_db.SQLSERVER:
cmd = [
azure.AZURE_PATH,
self.GetAzCommandForEngine(),
'db',
'rename',
'--resource-group',
self.resource_group.name,
'--server',
self.instance_id,
'--name',
self.database_name,
'--new-name',
new_name
]
vm_util.IssueCommand(cmd)
self.database_name = new_name
else:
raise relational_db.RelationalDbEngineNotFoundException(
'Unsupported engine {0}'.format(engine))
def _ApplyManagedMysqlFlags(self):
"""Applies the MySqlFlags to a managed instance."""
for flag in FLAGS.mysql_flags:
name_and_value = flag.split('=')
cmd = [
azure.AZURE_PATH,
self.GetAzCommandForEngine(), 'server', 'configuration', 'set',
'--name', name_and_value[0], '--resource-group',
self.resource_group.name, '--server', self.instance_id, '--value',
name_and_value[1]
]
_, stderr, _ = vm_util.IssueCommand(cmd, raise_on_failure=False)
if stderr:
raise Exception('Invalid MySQL flags: {0}. Error {1}'.format(
name_and_value, stderr))
def _CreateMySqlOrPostgresInstance(self):
"""Creates a managed MySql or Postgres instance."""
if not self.spec.high_availability:
raise Exception('Azure databases can only be used in high '
'availability. Please rerurn with flag '
'--managed_db_high_availability=True')
# Valid storage sizes range from minimum of 5120 MB
# and additional increments of 1024 MB up to maximum of 16777216 MB.
azure_disk_size_mb = self.spec.db_disk_spec.disk_size * 1024
if azure_disk_size_mb > AZURE_MAX_DB_DISK_SIZE_MB:
error_msg = ('Azure disk size was specified as in the disk spec as %s,'
'got rounded to %s which is greater than the '
'maximum of 16777216 MB' % (
self.spec.db_disk_spec.disk_size, azure_disk_size_mb))
raise errors.Config.InvalidValue(error_msg)
elif azure_disk_size_mb < AZURE_MIN_DB_DISK_SIZE_MB:
error_msg = ('Azure disk size was specified '
'as in the disk spec as %s, got rounded to %s '
'which is smaller than the minimum of 5120 MB' % (
self.spec.db_disk_spec.disk_size, azure_disk_size_mb))
raise errors.Config.InvalidValue(error_msg)
cmd = [
azure.AZURE_PATH,
self.GetAzCommandForEngine(),
'server',
'create',
'--resource-group',
self.resource_group.name,
'--name',
self.instance_id,
'--location',
self.location,
'--admin-user',
self.spec.database_username,
'--admin-password',
self.spec.database_password,
'--storage-size',
str(azure_disk_size_mb),
'--sku-name',
self.spec.db_spec.machine_type,
'--version',
self.spec.engine_version,
]
vm_util.IssueCommand(cmd, timeout=CREATE_AZURE_DB_TIMEOUT)
def _CreateSqlServerInstance(self):
"""Creates a managed sql server instance."""
cmd = [
azure.AZURE_PATH,
self.GetAzCommandForEngine(),
'server',
'create',
'--resource-group',
self.resource_group.name,
'--name',
self.instance_id,
'--location',
self.location,
'--admin-user',
self.spec.database_username,
'--admin-password',
self.spec.database_password
]
vm_util.IssueCommand(cmd)
# Supported families & capacities for 'Standard' are:
# [(None, 10), (None, 20), (None, 50), (None, 100), (None, 200),
# (None, 400), (None, 800), (None, 1600), (None, 3000)]
# Supported families & capacities for 'Premium' are:
# [(None, 125), (None, 250), (None, 500), (None, 1000), (None, 1750),
# (None, 4000)].
cmd = [
azure.AZURE_PATH,
self.GetAzCommandForEngine(),
'db',
'create',
'--resource-group',
self.resource_group.name,
'--server',
self.instance_id,
'--name',
DEFAULT_DATABASE_NAME,
'--edition',
self.spec.db_spec.tier,
'--capacity',
str(self.spec.db_spec.compute_units),
'--zone-redundant',
'true' if self.spec.high_availability else 'false'
]
vm_util.IssueCommand(cmd)
self.database_name = DEFAULT_DATABASE_NAME
def _CreateAzureManagedSqlInstance(self):
"""Creates an Azure Sql Instance from a managed service."""
if self.spec.engine == relational_db.POSTGRES:
self._CreateMySqlOrPostgresInstance()
elif self.spec.engine == relational_db.MYSQL:
self._CreateMySqlOrPostgresInstance()
self._ApplyManagedMysqlFlags()
elif self.spec.engine == relational_db.SQLSERVER:
self._CreateSqlServerInstance()
else:
raise NotImplementedError('Unknown how to create Azure data base '
'engine {0}'.format(self.spec.engine))
def _CreateAzureUnmanagedSqlInstance(self):
"""Creates an Azure Sql Instance hosted inside of a VM."""
self.endpoint = self.server_vm.ip_address
if self.spec.engine == relational_db.MYSQL:
self._InstallMySQLServer()
self._ApplyMySqlFlags()
else:
raise Exception(
'Engine {0} not supported for unmanaged databases.'.format(
self.spec.engine))
self.firewall = azure_network.AzureFirewall()
self.firewall.AllowPort(
self.server_vm,
'3306',
source_range=['%s/32' % self.client_vm.ip_address])
def _Create(self):
"""Creates the Azure RDS instance.
Raises:
NotImplementedError: if unknown how to create self.spec.engine.
Exception: if attempting to create a non high availability database.
"""
if self.spec.engine == relational_db.MYSQL:
self._InstallMySQLClient()
if self.is_managed_db:
self._CreateAzureManagedSqlInstance()
else:
self.unmanaged_db_exists = True
self._CreateAzureUnmanagedSqlInstance()
def _Delete(self):
"""Deletes the underlying resource.
Implementations of this method should be idempotent since it may
be called multiple times, even if the resource has already been
deleted.
"""
if not self.is_managed_db:
if hasattr(self, 'firewall'):
self.firewall.DisallowAllPorts()
self.unmanaged_db_exists = False
self.server_vm.RemoteCommand('sudo cat /var/log/mysql/error.log')
return
cmd = [
azure.AZURE_PATH,
self.GetAzCommandForEngine(),
'server',
'delete',
'--resource-group', self.resource_group.name,
'--name', self.instance_id,
'--yes'
]
vm_util.IssueCommand(cmd, raise_on_failure=False)
def _Exists(self):
"""Returns true if the underlying resource exists.
Supplying this method is optional. If it is not implemented then the
default is to assume success when _Create and _Delete do not raise
exceptions.
"""
if not self.is_managed_db:
return self.unmanaged_db_exists
json_server_show = self._AzServerShow()
if json_server_show is None:
return False
return True
def _IsReady(self, timeout=IS_READY_TIMEOUT):
"""Return true if the underlying resource is ready.
This method will query the instance every 5 seconds until
its instance state is 'available', or until a timeout occurs.
Args:
timeout: timeout in seconds
Returns:
True if the resource was ready in time, False if the wait timed out
or an Exception occurred.
"""
return self._IsInstanceReady(timeout)
def _PostCreate(self):
"""Perform general post create operations on the cluster.
"""
if not self.is_managed_db:
return
cmd = [
azure.AZURE_PATH,
self.GetAzCommandForEngine(),
'server',
'firewall-rule',
'create',
'--resource-group', self.resource_group.name,
'--server', self.instance_id,
'--name', 'AllowAllIps',
'--start-ip-address', '0.0.0.0',
'--end-ip-address', '255.255.255.255'
]
vm_util.IssueCommand(cmd)
self._AssignPortsForWriterInstance()
if self.spec.engine == 'mysql' or self.spec.engine == 'postgres':
# Azure will add @domainname after the database username
self.spec.database_username = (self.spec.database_username + '@' +
self.endpoint.split('.')[0])
def _IsInstanceReady(self, timeout=IS_READY_TIMEOUT):
"""Return true if the instance is ready.
This method will query the instance every 5 seconds until
its instance state is 'Ready', or until a timeout occurs.
Args:
timeout: timeout in seconds
Returns:
True if the resource was ready in time, False if the wait timed out
or an Exception occurred.
"""
if not self.is_managed_db:
return self._IsReadyUnmanaged()
start_time = datetime.datetime.now()
while True:
if (datetime.datetime.now() - start_time).seconds >= timeout:
logging.warning('Timeout waiting for sql instance to be ready')
return False
server_show_json = self._AzServerShow()
if server_show_json is not None:
engine = self.spec.engine
if engine == relational_db.POSTGRES:
state = server_show_json['userVisibleState']
elif engine == relational_db.MYSQL:
state = server_show_json['userVisibleState']
elif engine == relational_db.SQLSERVER:
state = server_show_json['state']
else:
raise relational_db.RelationalDbEngineNotFoundException(
'The db engine does not contain a valid state')
if state == 'Ready':
break
time.sleep(5)
return True
def _AzServerShow(self):
"""Runs the azure command az server show.
Returns:
json object representing holding the of the show command on success.
None for a non 0 retcode. A non 0 retcode can occur if queried
before the database has finished being created.
"""
cmd = [
azure.AZURE_PATH,
self.GetAzCommandForEngine(),
'server',
'show',
'--resource-group', self.resource_group.name,
'--name', self.instance_id
]
stdout, _, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
if retcode != 0:
return None
json_output = json.loads(stdout)
return json_output
def _AssignPortsForWriterInstance(self):
"""Assigns the ports and endpoints from the instance_id to self.
These will be used to communicate with the data base
"""
server_show_json = self._AzServerShow()
self.endpoint = server_show_json['fullyQualifiedDomainName']
self.port = self.GetDefaultPort()
def MakePsqlConnectionString(self, database_name):
"""Makes the connection string used to connect via PSql.
Override implemenation in base class. Azure postgres needs this format.
Args:
database_name: string, the name of the database to connect to.
Returns:
The connection string to use.
"""
return '\'host={0} user={1}@{2} password={3} dbname={4}\''.format(
self.endpoint,
self.spec.database_username,
self.instance_id,
self.spec.database_password,
database_name)
def _FailoverHA(self):
raise NotImplementedError()
|
from __future__ import absolute_import
from xml.sax.handler import ContentHandler
from lxml import etree
from lxml.etree import ElementTree, SubElement
from lxml.etree import Comment, ProcessingInstruction
class SaxError(etree.LxmlError):
"""General SAX error.
"""
def _getNsTag(tag):
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return None, tag
class ElementTreeContentHandler(ContentHandler):
"""Build an lxml ElementTree from SAX events.
"""
def __init__(self, makeelement=None):
ContentHandler.__init__(self)
self._root = None
self._root_siblings = []
self._element_stack = []
self._default_ns = None
self._ns_mapping = { None : [None] }
self._new_mappings = {}
if makeelement is None:
makeelement = etree.Element
self._makeelement = makeelement
def _get_etree(self):
"Contains the generated ElementTree after parsing is finished."
return ElementTree(self._root)
etree = property(_get_etree, doc=_get_etree.__doc__)
def setDocumentLocator(self, locator):
pass
def startDocument(self):
pass
def endDocument(self):
pass
def startPrefixMapping(self, prefix, uri):
self._new_mappings[prefix] = uri
try:
self._ns_mapping[prefix].append(uri)
except KeyError:
self._ns_mapping[prefix] = [uri]
if prefix is None:
self._default_ns = uri
def endPrefixMapping(self, prefix):
ns_uri_list = self._ns_mapping[prefix]
ns_uri_list.pop()
if prefix is None:
self._default_ns = ns_uri_list[-1]
def _buildTag(self, ns_name_tuple):
ns_uri, local_name = ns_name_tuple
if ns_uri:
el_tag = "{%s}%s" % ns_name_tuple
elif self._default_ns:
el_tag = "{%s}%s" % (self._default_ns, local_name)
else:
el_tag = local_name
return el_tag
def startElementNS(self, ns_name, qname, attributes=None):
el_name = self._buildTag(ns_name)
if attributes:
attrs = {}
try:
iter_attributes = attributes.iteritems()
except AttributeError:
iter_attributes = attributes.items()
for name_tuple, value in iter_attributes:
if name_tuple[0]:
attr_name = "{%s}%s" % name_tuple
else:
attr_name = name_tuple[1]
attrs[attr_name] = value
else:
attrs = None
element_stack = self._element_stack
if self._root is None:
element = self._root = \
self._makeelement(el_name, attrs, self._new_mappings)
if self._root_siblings and hasattr(element, 'addprevious'):
for sibling in self._root_siblings:
element.addprevious(sibling)
del self._root_siblings[:]
else:
element = SubElement(element_stack[-1], el_name,
attrs, self._new_mappings)
element_stack.append(element)
self._new_mappings.clear()
def processingInstruction(self, target, data):
pi = ProcessingInstruction(target, data)
if self._root is None:
self._root_siblings.append(pi)
else:
self._element_stack[-1].append(pi)
def endElementNS(self, ns_name, qname):
element = self._element_stack.pop()
el_tag = self._buildTag(ns_name)
if el_tag != element.tag:
raise SaxError("Unexpected element closed: " + el_tag)
def startElement(self, name, attributes=None):
if attributes:
attributes = dict(
[((None, k), v) for k, v in attributes.items()]
)
self.startElementNS((None, name), name, attributes)
def endElement(self, name):
self.endElementNS((None, name), name)
def characters(self, data):
last_element = self._element_stack[-1]
try:
# if there already is a child element, we must append to its tail
last_element = last_element[-1]
last_element.tail = (last_element.tail or '') + data
except IndexError:
# otherwise: append to the text
last_element.text = (last_element.text or '') + data
ignorableWhitespace = characters
class ElementTreeProducer(object):
"""Produces SAX events for an element and children.
"""
def __init__(self, element_or_tree, content_handler):
try:
element = element_or_tree.getroot()
except AttributeError:
element = element_or_tree
self._element = element
self._content_handler = content_handler
from xml.sax.xmlreader import AttributesNSImpl as attr_class
self._attr_class = attr_class
self._empty_attributes = attr_class({}, {})
def saxify(self):
self._content_handler.startDocument()
element = self._element
if hasattr(element, 'getprevious'):
siblings = []
sibling = element.getprevious()
while getattr(sibling, 'tag', None) is ProcessingInstruction:
siblings.append(sibling)
sibling = sibling.getprevious()
for sibling in siblings[::-1]:
self._recursive_saxify(sibling, {})
self._recursive_saxify(element, {})
if hasattr(element, 'getnext'):
sibling = element.getnext()
while getattr(sibling, 'tag', None) is ProcessingInstruction:
self._recursive_saxify(sibling, {})
sibling = sibling.getnext()
self._content_handler.endDocument()
def _recursive_saxify(self, element, parent_nsmap):
content_handler = self._content_handler
tag = element.tag
if tag is Comment or tag is ProcessingInstruction:
if tag is ProcessingInstruction:
content_handler.processingInstruction(
element.target, element.text)
tail = element.tail
if tail:
content_handler.characters(tail)
return
element_nsmap = element.nsmap
new_prefixes = []
if element_nsmap != parent_nsmap:
# There have been updates to the namespace
for prefix, ns_uri in element_nsmap.items():
if parent_nsmap.get(prefix) != ns_uri:
new_prefixes.append( (prefix, ns_uri) )
attribs = element.items()
if attribs:
attr_values = {}
attr_qnames = {}
for attr_ns_name, value in attribs:
attr_ns_tuple = _getNsTag(attr_ns_name)
attr_values[attr_ns_tuple] = value
attr_qnames[attr_ns_tuple] = self._build_qname(
attr_ns_tuple[0], attr_ns_tuple[1], element_nsmap,
preferred_prefix=None, is_attribute=True)
sax_attributes = self._attr_class(attr_values, attr_qnames)
else:
sax_attributes = self._empty_attributes
ns_uri, local_name = _getNsTag(tag)
qname = self._build_qname(
ns_uri, local_name, element_nsmap, element.prefix, is_attribute=False)
for prefix, uri in new_prefixes:
content_handler.startPrefixMapping(prefix, uri)
content_handler.startElementNS(
(ns_uri, local_name), qname, sax_attributes)
text = element.text
if text:
content_handler.characters(text)
for child in element:
self._recursive_saxify(child, element_nsmap)
content_handler.endElementNS((ns_uri, local_name), qname)
for prefix, uri in new_prefixes:
content_handler.endPrefixMapping(prefix)
tail = element.tail
if tail:
content_handler.characters(tail)
def _build_qname(self, ns_uri, local_name, nsmap, preferred_prefix, is_attribute):
if ns_uri is None:
return local_name
if not is_attribute and nsmap.get(preferred_prefix) == ns_uri:
prefix = preferred_prefix
else:
# Pick the first matching prefix, in alphabetical order.
candidates = [
pfx for (pfx, uri) in nsmap.items()
if pfx is not None and uri == ns_uri
]
prefix = (
candidates[0] if len(candidates) == 1
else min(candidates) if candidates
else None
)
if prefix is None:
# Default namespace
return local_name
return prefix + ':' + local_name
def saxify(element_or_tree, content_handler):
"""One-shot helper to generate SAX events from an XML tree and fire
them against a SAX ContentHandler.
"""
return ElementTreeProducer(element_or_tree, content_handler).saxify()
|
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import unittest
import requests
import fs.memoryfs
from instalooter.looters import InstaLooter, ProfileLooter
USERNAME = os.getenv("IG_USERNAME")
PASSWORD = os.getenv("IG_PASSWORD")
try:
CONNECTION_FAILURE = not requests.get("https://instagr.am/instagram").ok
except requests.exceptions.ConnectionError:
CONNECTION_FAILURE = True
@unittest.skipIf(os.getenv("CI") == "true", "not supported in CI")
@unittest.skipUnless(USERNAME and PASSWORD, "credentials required")
class TestLogin(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.session = requests.Session()
@classmethod
def tearDownClass(cls):
cls.session.close()
def setUp(self):
self.looter = ProfileLooter(USERNAME, template="test")
self.destfs = fs.memoryfs.MemoryFS()
def tearDown(self):
self.destfs.close()
def test_login(self):
self.assertFalse(self.looter.logged_in())
self.assertRaises(RuntimeError, self.looter.medias)
self.assertFalse(self.looter._cachefs().exists(self.looter._COOKIE_FILE))
try:
self.looter.login(USERNAME, PASSWORD)
self.assertTrue(self.looter.logged_in())
self.assertTrue(self.looter._cachefs().exists(self.looter._COOKIE_FILE))
self.assertTrue(next(self.looter.medias()))
finally:
self.looter.logout()
self.assertFalse(self.looter._cachefs().exists(self.looter._COOKIE_FILE))
def test_download(self):
try:
self.looter.login(USERNAME, PASSWORD)
self.looter.download(self.destfs)
self.assertTrue(self.destfs.exists('test.jpg'))
self.assertEqual(self.destfs.getbytes('test.jpg')[6:10], b'JFIF')
finally:
self.looter.logout()
|
from datetime import timedelta
from homeassistant import data_entry_flow
from homeassistant.components.geonetnz_quakes import (
CONF_MINIMUM_MAGNITUDE,
CONF_MMI,
DOMAIN,
)
from homeassistant.const import (
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_SCAN_INTERVAL,
CONF_UNIT_SYSTEM,
)
from tests.async_mock import patch
async def test_duplicate_error(hass, config_entry):
"""Test that errors are shown when duplicates are added."""
conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25}
config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_step_import(hass):
"""Test that the import step works."""
conf = {
CONF_LATITUDE: -41.2,
CONF_LONGITUDE: 174.7,
CONF_RADIUS: 25,
CONF_UNIT_SYSTEM: "metric",
CONF_MMI: 2,
CONF_SCAN_INTERVAL: timedelta(minutes=4),
CONF_MINIMUM_MAGNITUDE: 2.5,
}
with patch(
"homeassistant.components.geonetnz_quakes.async_setup_entry", return_value=True
), patch("homeassistant.components.geonetnz_quakes.async_setup", return_value=True):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "import"}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "-41.2, 174.7"
assert result["data"] == {
CONF_LATITUDE: -41.2,
CONF_LONGITUDE: 174.7,
CONF_RADIUS: 25,
CONF_MMI: 2,
CONF_UNIT_SYSTEM: "metric",
CONF_SCAN_INTERVAL: 240.0,
CONF_MINIMUM_MAGNITUDE: 2.5,
}
async def test_step_user(hass):
"""Test that the user step works."""
hass.config.latitude = -41.2
hass.config.longitude = 174.7
conf = {CONF_RADIUS: 25, CONF_MMI: 4}
with patch(
"homeassistant.components.geonetnz_quakes.async_setup_entry", return_value=True
), patch("homeassistant.components.geonetnz_quakes.async_setup", return_value=True):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "-41.2, 174.7"
assert result["data"] == {
CONF_LATITUDE: -41.2,
CONF_LONGITUDE: 174.7,
CONF_RADIUS: 25,
CONF_MMI: 4,
CONF_SCAN_INTERVAL: 300.0,
CONF_MINIMUM_MAGNITUDE: 0.0,
}
|
from gitsweep.tests.testcases import GitSweepTestCase, InspectorTestCase
class TestInspector(GitSweepTestCase, InspectorTestCase):
"""
Inspector can find merged branches and present them for cleaning.
"""
def test_no_branches(self):
"""
If only the master branch is present, nothing to clean.
"""
self.assertEqual([], self.inspector.merged_refs())
def test_filtered_refs(self):
"""
Will filter references and not return HEAD and master.
"""
for i in range(1, 4):
self.command('git checkout -b branch{0}'.format(i))
self.command('git checkout master')
refs = self.inspector._filtered_remotes(
self.inspector.repo.remotes[0])
self.assertEqual(['branch1', 'branch2', 'branch3'],
[i.remote_head for i in refs])
def test_one_branch_no_commits(self):
"""
There is one branch on the remote that is the same as master.
"""
self.command('git checkout -b branch1')
self.command('git checkout master')
# Since this is the same as master, it should show up as merged
self.assertEqual(['branch1'], self.merged_refs())
def test_one_branch_one_commit(self):
"""
A commit has been made in the branch so it's not safe to remove.
"""
self.command('git checkout -b branch1')
self.make_commit()
self.command('git checkout master')
# Since there is a commit in branch1, it's not safe to remove it
self.assertEqual([], self.merged_refs())
def test_one_merged_branch(self):
"""
If a branch has been merged, it's safe to delete it.
"""
self.command('git checkout -b branch1')
self.make_commit()
self.command('git checkout master')
self.command('git merge branch1')
self.assertEqual(['branch1'], self.merged_refs())
def test_commit_in_master(self):
"""
Commits in master not in the branch do not block it for deletion.
"""
self.command('git checkout -b branch1')
self.make_commit()
self.command('git checkout master')
self.make_commit()
self.command('git merge branch1')
self.assertEqual(['branch1'], self.merged_refs())
def test_large_set_of_changes(self):
r"""
A long list of changes is properly marked for deletion.
The branch history for this will look like this:
::
|\
| * 08d07e1 Adding 4e510716
* | 056abb2 Adding a0dfc9fb
|/
* 9d77626 Merge branch 'branch4'
|\
| * 956b3f9 Adding e16ec279
* | d11315e Adding 9571d55d
|/
* f100932 Merge branch 'branch3'
|\
| * c641899 Adding 9b33164f
* | 17c1e35 Adding b56c43be
|/
* c83c8d3 Merge branch 'branch2'
|\
| * bead4e5 Adding 31a13fa4
* | 5a88ec3 Adding b6a45f21
|/
* f34643d Merge branch 'branch1'
|\
| * 8e110c4 Adding 11948eb5
* | 4c94394 Adding db29f4aa
|/
"""
for i in range(1, 6):
self.command('git checkout -b branch{0}'.format(i))
self.make_commit()
self.command('git checkout master')
self.make_commit()
self.command('git merge branch{0}'.format(i))
self.assertEqual(
['branch1', 'branch2', 'branch3', 'branch4', 'branch5'],
self.merged_refs())
|
from io import BytesIO
from urllib.parse import urlparse, urlencode, urlunparse
import copy
import json
import zlib
from .util import CaseInsensitiveDict
def replace_headers(request, replacements):
"""Replace headers in request according to replacements.
The replacements should be a list of (key, value) pairs where the value can be any of:
1. A simple replacement string value.
2. None to remove the given header.
3. A callable which accepts (key, value, request) and returns a string value or None.
"""
new_headers = request.headers.copy()
for k, rv in replacements:
if k in new_headers:
ov = new_headers.pop(k)
if callable(rv):
rv = rv(key=k, value=ov, request=request)
if rv is not None:
new_headers[k] = rv
request.headers = new_headers
return request
def remove_headers(request, headers_to_remove):
"""
Wrap replace_headers() for API backward compatibility.
"""
replacements = [(k, None) for k in headers_to_remove]
return replace_headers(request, replacements)
def replace_query_parameters(request, replacements):
"""Replace query parameters in request according to replacements.
The replacements should be a list of (key, value) pairs where the value can be any of:
1. A simple replacement string value.
2. None to remove the given header.
3. A callable which accepts (key, value, request) and returns a string
value or None.
"""
query = request.query
new_query = []
replacements = dict(replacements)
for k, ov in query:
if k not in replacements:
new_query.append((k, ov))
else:
rv = replacements[k]
if callable(rv):
rv = rv(key=k, value=ov, request=request)
if rv is not None:
new_query.append((k, rv))
uri_parts = list(urlparse(request.uri))
uri_parts[4] = urlencode(new_query)
request.uri = urlunparse(uri_parts)
return request
def remove_query_parameters(request, query_parameters_to_remove):
"""
Wrap replace_query_parameters() for API backward compatibility.
"""
replacements = [(k, None) for k in query_parameters_to_remove]
return replace_query_parameters(request, replacements)
def replace_post_data_parameters(request, replacements):
"""Replace post data in request--either form data or json--according to replacements.
The replacements should be a list of (key, value) pairs where the value can be any of:
1. A simple replacement string value.
2. None to remove the given header.
3. A callable which accepts (key, value, request) and returns a string
value or None.
"""
if not request.body:
# Nothing to replace
return request
replacements = dict(replacements)
if request.method == "POST" and not isinstance(request.body, BytesIO):
if request.headers.get("Content-Type") == "application/json":
json_data = json.loads(request.body.decode("utf-8"))
for k, rv in replacements.items():
if k in json_data:
ov = json_data.pop(k)
if callable(rv):
rv = rv(key=k, value=ov, request=request)
if rv is not None:
json_data[k] = rv
request.body = json.dumps(json_data).encode("utf-8")
else:
if isinstance(request.body, str):
request.body = request.body.encode("utf-8")
splits = [p.partition(b"=") for p in request.body.split(b"&")]
new_splits = []
for k, sep, ov in splits:
if sep is None:
new_splits.append((k, sep, ov))
else:
rk = k.decode("utf-8")
if rk not in replacements:
new_splits.append((k, sep, ov))
else:
rv = replacements[rk]
if callable(rv):
rv = rv(key=rk, value=ov.decode("utf-8"), request=request)
if rv is not None:
new_splits.append((k, sep, rv.encode("utf-8")))
request.body = b"&".join(k if sep is None else b"".join([k, sep, v]) for k, sep, v in new_splits)
return request
def remove_post_data_parameters(request, post_data_parameters_to_remove):
"""
Wrap replace_post_data_parameters() for API backward compatibility.
"""
replacements = [(k, None) for k in post_data_parameters_to_remove]
return replace_post_data_parameters(request, replacements)
def decode_response(response):
"""
If the response is compressed with gzip or deflate:
1. decompress the response body
2. delete the content-encoding header
3. update content-length header to decompressed length
"""
def is_compressed(headers):
encoding = headers.get("content-encoding", [])
return encoding and encoding[0] in ("gzip", "deflate")
def decompress_body(body, encoding):
"""Returns decompressed body according to encoding using zlib.
to (de-)compress gzip format, use wbits = zlib.MAX_WBITS | 16
"""
if encoding == "gzip":
return zlib.decompress(body, zlib.MAX_WBITS | 16)
else: # encoding == 'deflate'
return zlib.decompress(body)
# Deepcopy here in case `headers` contain objects that could
# be mutated by a shallow copy and corrupt the real response.
response = copy.deepcopy(response)
headers = CaseInsensitiveDict(response["headers"])
if is_compressed(headers):
encoding = headers["content-encoding"][0]
headers["content-encoding"].remove(encoding)
if not headers["content-encoding"]:
del headers["content-encoding"]
new_body = decompress_body(response["body"]["string"], encoding)
response["body"]["string"] = new_body
headers["content-length"] = [str(len(new_body))]
response["headers"] = dict(headers)
return response
|
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_PRESENCE,
DEVICE_CLASS_SMOKE,
BinarySensorEntity,
)
from .const import ATTR_DISCOVER_DEVICES, ATTR_DISCOVERY_TYPE, DISCOVER_BATTERY
from .entity import HMDevice
SENSOR_TYPES_CLASS = {
"IPShutterContact": DEVICE_CLASS_OPENING,
"IPShutterContactSabotage": DEVICE_CLASS_OPENING,
"MaxShutterContact": DEVICE_CLASS_OPENING,
"Motion": DEVICE_CLASS_MOTION,
"MotionV2": DEVICE_CLASS_MOTION,
"PresenceIP": DEVICE_CLASS_PRESENCE,
"Remote": None,
"RemoteMotion": None,
"ShutterContact": DEVICE_CLASS_OPENING,
"Smoke": DEVICE_CLASS_SMOKE,
"SmokeV2": DEVICE_CLASS_SMOKE,
"TiltSensor": None,
"WeatherSensor": None,
"IPContact": DEVICE_CLASS_OPENING,
"MotionIPV2": DEVICE_CLASS_MOTION,
"IPRemoteMotionV2": DEVICE_CLASS_MOTION,
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the HomeMatic binary sensor platform."""
if discovery_info is None:
return
devices = []
for conf in discovery_info[ATTR_DISCOVER_DEVICES]:
if discovery_info[ATTR_DISCOVERY_TYPE] == DISCOVER_BATTERY:
devices.append(HMBatterySensor(conf))
else:
devices.append(HMBinarySensor(conf))
add_entities(devices, True)
class HMBinarySensor(HMDevice, BinarySensorEntity):
"""Representation of a binary HomeMatic device."""
@property
def is_on(self):
"""Return true if switch is on."""
if not self.available:
return False
return bool(self._hm_get_state())
@property
def device_class(self):
"""Return the class of this sensor from DEVICE_CLASSES."""
# If state is MOTION (Only RemoteMotion working)
if self._state == "MOTION":
return DEVICE_CLASS_MOTION
return SENSOR_TYPES_CLASS.get(self._hmdevice.__class__.__name__)
def _init_data_struct(self):
"""Generate the data dictionary (self._data) from metadata."""
# Add state to data struct
if self._state:
self._data.update({self._state: None})
class HMBatterySensor(HMDevice, BinarySensorEntity):
"""Representation of an HomeMatic low battery sensor."""
@property
def device_class(self):
"""Return battery as a device class."""
return DEVICE_CLASS_BATTERY
@property
def is_on(self):
"""Return True if battery is low."""
return bool(self._hm_get_state())
def _init_data_struct(self):
"""Generate the data dictionary (self._data) from metadata."""
# Add state to data struct
if self._state:
self._data.update({self._state: None})
|
import attr
import pytest
from qutebrowser.misc import split
# Most tests copied from Python's shlex.
# The original test data set was from shellwords, by Hartmut Goebel.
# Format: input/split|output|without|keep/split|output|with|keep/
test_data_str = r"""
one two/one|two/one| two/
one "two three" four/one|two three|four/one| "two three"| four/
one 'two three' four/one|two three|four/one| 'two three'| four/
one "two\" three" four/one|two" three|four/one| "two\" three"| four/
one 'two'\'' three' four/one|two' three|four/one| 'two'\'' three'| four/
one "two three/one|two three/one| "two three/
one 'two three/one|two three/one| 'two three/
one\/one\/one\/
one "two\/one|two\/one| "two\/
one /one/one| /
open -t i/open|-t|i/open| -t| i/
foo bar/foo|bar/foo| bar/
foo bar/foo|bar/ foo| bar/
foo bar /foo|bar/ foo| bar| /
foo bar bla fasel/foo|bar|bla|fasel/foo| bar| bla| fasel/
x y z xxxx/x|y|z|xxxx/x| y| z| xxxx/
\x bar/x|bar/\x| bar/
\ x bar/ x|bar/\ x| bar/
\ bar/ bar/\ bar/
foo \x bar/foo|x|bar/foo| \x| bar/
foo \ x bar/foo| x|bar/foo| \ x| bar/
foo \ bar/foo| bar/foo| \ bar/
foo "bar" bla/foo|bar|bla/foo| "bar"| bla/
"foo" "bar" "bla"/foo|bar|bla/"foo"| "bar"| "bla"/
"foo" bar "bla"/foo|bar|bla/"foo"| bar| "bla"/
"foo" bar bla/foo|bar|bla/"foo"| bar| bla/
foo 'bar' bla/foo|bar|bla/foo| 'bar'| bla/
'foo' 'bar' 'bla'/foo|bar|bla/'foo'| 'bar'| 'bla'/
'foo' bar 'bla'/foo|bar|bla/'foo'| bar| 'bla'/
'foo' bar bla/foo|bar|bla/'foo'| bar| bla/
blurb foo"bar"bar"fasel" baz/blurb|foobarbarfasel|baz/blurb| foo"bar"bar"fasel"| baz/
blurb foo'bar'bar'fasel' baz/blurb|foobarbarfasel|baz/blurb| foo'bar'bar'fasel'| baz/
""//""/
''//''/
foo "" bar/foo||bar/foo| ""| bar/
foo '' bar/foo||bar/foo| ''| bar/
foo "" "" "" bar/foo||||bar/foo| ""| ""| ""| bar/
foo '' '' '' bar/foo||||bar/foo| ''| ''| ''| bar/
\"/"/\"/
"\""/"/"\""/
"foo\ bar"/foo\ bar/"foo\ bar"/
"foo\\ bar"/foo\ bar/"foo\\ bar"/
"foo\\ bar\""/foo\ bar"/"foo\\ bar\""/
"foo\\" bar\"/foo\|bar"/"foo\\"| bar\"/
"foo\\ bar\" dfadf"/foo\ bar" dfadf/"foo\\ bar\" dfadf"/
"foo\\\ bar\" dfadf"/foo\\ bar" dfadf/"foo\\\ bar\" dfadf"/
"foo\\\x bar\" dfadf"/foo\\x bar" dfadf/"foo\\\x bar\" dfadf"/
"foo\x bar\" dfadf"/foo\x bar" dfadf/"foo\x bar\" dfadf"/
\'/'/\'/
'foo\ bar'/foo\ bar/'foo\ bar'/
'foo\\ bar'/foo\\ bar/'foo\\ bar'/
"foo\\\x bar\" df'a\ 'df"/foo\\x bar" df'a\ 'df/"foo\\\x bar\" df'a\ 'df"/
\"foo/"foo/\"foo/
\"foo\x/"foox/\"foo\x/
"foo\x"/foo\x/"foo\x"/
"foo\ "/foo\ /"foo\ "/
foo\ xx/foo xx/foo\ xx/
foo\ x\x/foo xx/foo\ x\x/
foo\ x\x\"/foo xx"/foo\ x\x\"/
"foo\ x\x"/foo\ x\x/"foo\ x\x"/
"foo\ x\x\\"/foo\ x\x\/"foo\ x\x\\"/
"foo\ x\x\\""foobar"/foo\ x\x\foobar/"foo\ x\x\\""foobar"/
"foo\ x\x\\"\'"foobar"/foo\ x\x\'foobar/"foo\ x\x\\"\'"foobar"/
"foo\ x\x\\"\'"fo'obar"/foo\ x\x\'fo'obar/"foo\ x\x\\"\'"fo'obar"/
"foo\ x\x\\"\'"fo'obar" 'don'\''t'/foo\ x\x\'fo'obar|don't/"foo\ x\x\\"\'"fo'obar"| 'don'\''t'/
"foo\ x\x\\"\'"fo'obar" 'don'\''t' \\/foo\ x\x\'fo'obar|don't|\/"foo\ x\x\\"\'"fo'obar"| 'don'\''t'| \\/
foo\ bar/foo bar/foo\ bar/
:-) ;-)/:-)|;-)/:-)| ;-)/
áéíóú/áéíóú/áéíóú/
"""
def _parse_split_test_data_str():
"""Parse the test data set into a TestCase object to use in tests.
Returns:
A list of TestCase objects with str attributes: inp, keep, no_keep
"""
@attr.s
class TestCase:
inp = attr.ib()
keep = attr.ib()
no_keep = attr.ib()
for line in test_data_str.splitlines():
if not line:
continue
data = line.split('/')
item = TestCase(inp=data[0], keep=data[1].split('|'),
no_keep=data[2].split('|'))
yield item
yield TestCase(inp='', keep=[], no_keep=[])
class TestSplit:
"""Test split."""
@pytest.fixture(params=list(_parse_split_test_data_str()),
ids=lambda e: e.inp)
def split_test_case(self, request):
"""Fixture to automatically parametrize all depending tests.
It will use the test data from test_data_str, parsed using
_parse_split_test_data_str().
"""
return request.param
def test_split(self, split_test_case):
"""Test splitting."""
items = split.split(split_test_case.inp)
assert items == split_test_case.keep
def test_split_keep_original(self, split_test_case):
"""Test if splitting with keep=True yields the original string."""
items = split.split(split_test_case.inp, keep=True)
assert ''.join(items) == split_test_case.inp
def test_split_keep(self, split_test_case):
"""Test splitting with keep=True."""
items = split.split(split_test_case.inp, keep=True)
assert items == split_test_case.no_keep
class TestSimpleSplit:
"""Test simple_split."""
TESTS = {
' foo bar': [' foo', ' bar'],
'foobar': ['foobar'],
' foo bar baz ': [' foo', ' bar', ' baz', ' '],
'f\ti\ts\th': ['f', '\ti', '\ts', '\th'],
'foo\nbar': ['foo', '\nbar'],
}
@pytest.mark.parametrize('test', sorted(TESTS), ids=repr)
def test_str_split(self, test):
"""Test if the behavior matches str.split."""
assert split.simple_split(test) == test.rstrip().split()
@pytest.mark.parametrize('s, maxsplit',
[("foo bar baz", 1), (" foo bar baz ", 0)],
ids=repr)
def test_str_split_maxsplit(self, s, maxsplit):
"""Test if the behavior matches str.split with given maxsplit."""
actual = split.simple_split(s, maxsplit=maxsplit)
expected = s.rstrip().split(maxsplit=maxsplit)
assert actual == expected
@pytest.mark.parametrize('test, expected', sorted(TESTS.items()), ids=repr)
def test_split_keep(self, test, expected):
"""Test splitting with keep=True."""
assert split.simple_split(test, keep=True) == expected
def test_maxsplit_0_keep(self):
"""Test special case with maxsplit=0 and keep=True."""
s = "foo bar"
assert split.simple_split(s, keep=True, maxsplit=0) == [s]
|
import asyncio
import logging
from typing import Any, Dict, Iterable, Optional
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def _async_reproduce_states(
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce input boolean states."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in (STATE_ON, STATE_OFF):
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
if cur_state.state == state.state:
return
service = SERVICE_TURN_ON if state.state == STATE_ON else SERVICE_TURN_OFF
await hass.services.async_call(
DOMAIN,
service,
{ATTR_ENTITY_ID: state.entity_id},
context=context,
blocking=True,
)
async def async_reproduce_states(
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce component states."""
await asyncio.gather(
*(
_async_reproduce_states(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
|
import asyncio
import itertools as it
import logging
import voluptuous as vol
from homeassistant.auth.permissions.const import CAT_ENTITIES, POLICY_CONTROL
import homeassistant.config as conf_util
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
RESTART_EXIT_CODE,
SERVICE_HOMEASSISTANT_RESTART,
SERVICE_HOMEASSISTANT_STOP,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
import homeassistant.core as ha
from homeassistant.exceptions import HomeAssistantError, Unauthorized, UnknownUser
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.service import async_extract_entity_ids
_LOGGER = logging.getLogger(__name__)
DOMAIN = ha.DOMAIN
SERVICE_RELOAD_CORE_CONFIG = "reload_core_config"
SERVICE_CHECK_CONFIG = "check_config"
SERVICE_UPDATE_ENTITY = "update_entity"
SERVICE_SET_LOCATION = "set_location"
SCHEMA_UPDATE_ENTITY = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids})
async def async_setup(hass: ha.HomeAssistant, config: dict) -> bool:
"""Set up general services related to Home Assistant."""
async def async_handle_turn_service(service):
"""Handle calls to homeassistant.turn_on/off."""
entity_ids = await async_extract_entity_ids(hass, service)
# Generic turn on/off method requires entity id
if not entity_ids:
_LOGGER.error(
"homeassistant/%s cannot be called without entity_id", service.service
)
return
# Group entity_ids by domain. groupby requires sorted data.
by_domain = it.groupby(
sorted(entity_ids), lambda item: ha.split_entity_id(item)[0]
)
tasks = []
for domain, ent_ids in by_domain:
# This leads to endless loop.
if domain == DOMAIN:
_LOGGER.warning(
"Called service homeassistant.%s with invalid entity IDs %s",
service.service,
", ".join(ent_ids),
)
continue
# We want to block for all calls and only return when all calls
# have been processed. If a service does not exist it causes a 10
# second delay while we're blocking waiting for a response.
# But services can be registered on other HA instances that are
# listening to the bus too. So as an in between solution, we'll
# block only if the service is defined in the current HA instance.
blocking = hass.services.has_service(domain, service.service)
# Create a new dict for this call
data = dict(service.data)
# ent_ids is a generator, convert it to a list.
data[ATTR_ENTITY_ID] = list(ent_ids)
tasks.append(
hass.services.async_call(
domain, service.service, data, blocking, context=service.context
)
)
if tasks:
await asyncio.gather(*tasks)
service_schema = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids}, extra=vol.ALLOW_EXTRA)
hass.services.async_register(
ha.DOMAIN, SERVICE_TURN_OFF, async_handle_turn_service, schema=service_schema
)
hass.services.async_register(
ha.DOMAIN, SERVICE_TURN_ON, async_handle_turn_service, schema=service_schema
)
hass.services.async_register(
ha.DOMAIN, SERVICE_TOGGLE, async_handle_turn_service, schema=service_schema
)
async def async_handle_core_service(call):
"""Service handler for handling core services."""
if call.service == SERVICE_HOMEASSISTANT_STOP:
hass.async_create_task(hass.async_stop())
return
try:
errors = await conf_util.async_check_ha_config_file(hass)
except HomeAssistantError:
return
if errors:
_LOGGER.error(errors)
hass.components.persistent_notification.async_create(
"Config error. See [the logs](/config/logs) for details.",
"Config validating",
f"{ha.DOMAIN}.check_config",
)
return
if call.service == SERVICE_HOMEASSISTANT_RESTART:
hass.async_create_task(hass.async_stop(RESTART_EXIT_CODE))
async def async_handle_update_service(call):
"""Service handler for updating an entity."""
if call.context.user_id:
user = await hass.auth.async_get_user(call.context.user_id)
if user is None:
raise UnknownUser(
context=call.context,
permission=POLICY_CONTROL,
user_id=call.context.user_id,
)
for entity in call.data[ATTR_ENTITY_ID]:
if not user.permissions.check_entity(entity, POLICY_CONTROL):
raise Unauthorized(
context=call.context,
permission=POLICY_CONTROL,
user_id=call.context.user_id,
perm_category=CAT_ENTITIES,
)
tasks = [
hass.helpers.entity_component.async_update_entity(entity)
for entity in call.data[ATTR_ENTITY_ID]
]
if tasks:
await asyncio.wait(tasks)
hass.helpers.service.async_register_admin_service(
ha.DOMAIN, SERVICE_HOMEASSISTANT_STOP, async_handle_core_service
)
hass.helpers.service.async_register_admin_service(
ha.DOMAIN, SERVICE_HOMEASSISTANT_RESTART, async_handle_core_service
)
hass.helpers.service.async_register_admin_service(
ha.DOMAIN, SERVICE_CHECK_CONFIG, async_handle_core_service
)
hass.services.async_register(
ha.DOMAIN,
SERVICE_UPDATE_ENTITY,
async_handle_update_service,
schema=SCHEMA_UPDATE_ENTITY,
)
async def async_handle_reload_config(call):
"""Service handler for reloading core config."""
try:
conf = await conf_util.async_hass_config_yaml(hass)
except HomeAssistantError as err:
_LOGGER.error(err)
return
# auth only processed during startup
await conf_util.async_process_ha_core_config(hass, conf.get(ha.DOMAIN) or {})
hass.helpers.service.async_register_admin_service(
ha.DOMAIN, SERVICE_RELOAD_CORE_CONFIG, async_handle_reload_config
)
async def async_set_location(call):
"""Service handler to set location."""
await hass.config.async_update(
latitude=call.data[ATTR_LATITUDE], longitude=call.data[ATTR_LONGITUDE]
)
hass.helpers.service.async_register_admin_service(
ha.DOMAIN,
SERVICE_SET_LOCATION,
async_set_location,
vol.Schema({ATTR_LATITUDE: cv.latitude, ATTR_LONGITUDE: cv.longitude}),
)
return True
|
import asyncio
import json
import logging
from pysqueezebox import Server, async_discover
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
ATTR_MEDIA_ENQUEUE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
SUPPORT_BROWSE_MEDIA,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.config_entries import SOURCE_DISCOVERY
from homeassistant.const import (
ATTR_COMMAND,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
EVENT_HOMEASSISTANT_START,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.util.dt import utcnow
from .browse_media import build_item_response, generate_playlist, library_payload
from .const import (
DEFAULT_PORT,
DISCOVERY_TASK,
DOMAIN,
KNOWN_PLAYERS,
PLAYER_DISCOVERY_UNSUB,
)
SERVICE_CALL_METHOD = "call_method"
SERVICE_CALL_QUERY = "call_query"
SERVICE_SYNC = "sync"
SERVICE_UNSYNC = "unsync"
ATTR_QUERY_RESULT = "query_result"
ATTR_SYNC_GROUP = "sync_group"
SIGNAL_PLAYER_REDISCOVERED = "squeezebox_player_rediscovered"
_LOGGER = logging.getLogger(__name__)
DISCOVERY_INTERVAL = 60
SUPPORT_SQUEEZEBOX = (
SUPPORT_BROWSE_MEDIA
| SUPPORT_PAUSE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_SEEK
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PLAY_MEDIA
| SUPPORT_PLAY
| SUPPORT_SHUFFLE_SET
| SUPPORT_CLEAR_PLAYLIST
| SUPPORT_STOP
)
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_HOST),
cv.deprecated(CONF_PORT),
cv.deprecated(CONF_PASSWORD),
cv.deprecated(CONF_USERNAME),
PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
}
),
)
KNOWN_SERVERS = "known_servers"
ATTR_PARAMETERS = "parameters"
ATTR_OTHER_PLAYER = "other_player"
ATTR_TO_PROPERTY = [
ATTR_QUERY_RESULT,
ATTR_SYNC_GROUP,
]
SQUEEZEBOX_MODE = {
"pause": STATE_PAUSED,
"play": STATE_PLAYING,
"stop": STATE_IDLE,
}
async def start_server_discovery(hass):
"""Start a server discovery task."""
def _discovered_server(server):
asyncio.create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_DISCOVERY},
data={
CONF_HOST: server.host,
CONF_PORT: int(server.port),
"uuid": server.uuid,
},
)
)
hass.data.setdefault(DOMAIN, {})
if DISCOVERY_TASK not in hass.data[DOMAIN]:
_LOGGER.debug("Adding server discovery task for squeezebox")
hass.data[DOMAIN][DISCOVERY_TASK] = hass.async_create_task(
async_discover(_discovered_server)
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up squeezebox platform from platform entry in configuration.yaml (deprecated)."""
if config:
await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up an LMS Server from a config entry."""
config = config_entry.data
_LOGGER.debug("Reached async_setup_entry for host=%s", config[CONF_HOST])
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
host = config[CONF_HOST]
port = config[CONF_PORT]
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN].setdefault(config_entry.entry_id, {})
known_players = hass.data[DOMAIN].setdefault(KNOWN_PLAYERS, [])
_LOGGER.debug("Creating LMS object for %s", host)
lms = Server(async_get_clientsession(hass), host, port, username, password)
async def _discovery(now=None):
"""Discover squeezebox players by polling server."""
async def _discovered_player(player):
"""Handle a (re)discovered player."""
entity = next(
(
known
for known in known_players
if known.unique_id == player.player_id
),
None,
)
if entity:
await player.async_update()
async_dispatcher_send(
hass, SIGNAL_PLAYER_REDISCOVERED, player.player_id, player.connected
)
if not entity:
_LOGGER.debug("Adding new entity: %s", player)
entity = SqueezeBoxEntity(player)
known_players.append(entity)
async_add_entities([entity])
players = await lms.async_get_players()
if players:
for player in players:
hass.async_create_task(_discovered_player(player))
hass.data[DOMAIN][config_entry.entry_id][
PLAYER_DISCOVERY_UNSUB
] = hass.helpers.event.async_call_later(DISCOVERY_INTERVAL, _discovery)
_LOGGER.debug("Adding player discovery job for LMS server: %s", host)
asyncio.create_task(_discovery())
# Register entity services
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_CALL_METHOD,
{
vol.Required(ATTR_COMMAND): cv.string,
vol.Optional(ATTR_PARAMETERS): vol.All(
cv.ensure_list, vol.Length(min=1), [cv.string]
),
},
"async_call_method",
)
platform.async_register_entity_service(
SERVICE_CALL_QUERY,
{
vol.Required(ATTR_COMMAND): cv.string,
vol.Optional(ATTR_PARAMETERS): vol.All(
cv.ensure_list, vol.Length(min=1), [cv.string]
),
},
"async_call_query",
)
platform.async_register_entity_service(
SERVICE_SYNC,
{vol.Required(ATTR_OTHER_PLAYER): cv.string},
"async_sync",
)
platform.async_register_entity_service(SERVICE_UNSYNC, None, "async_unsync")
# Start server discovery task if not already running
if hass.is_running:
asyncio.create_task(start_server_discovery(hass))
else:
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, start_server_discovery(hass)
)
return True
class SqueezeBoxEntity(MediaPlayerEntity):
"""
Representation of a SqueezeBox device.
Wraps a pysqueezebox.Player() object.
"""
def __init__(self, player):
"""Initialize the SqueezeBox device."""
self._player = player
self._last_update = None
self._query_result = {}
self._available = True
self._remove_dispatcher = None
@property
def device_state_attributes(self):
"""Return device-specific attributes."""
squeezebox_attr = {
attr: getattr(self, attr)
for attr in ATTR_TO_PROPERTY
if getattr(self, attr) is not None
}
return squeezebox_attr
@property
def name(self):
"""Return the name of the device."""
return self._player.name
@property
def unique_id(self):
"""Return a unique ID."""
return self._player.player_id
@property
def available(self):
"""Return True if device connected to LMS server."""
return self._available
@callback
def rediscovered(self, unique_id, connected):
"""Make a player available again."""
if unique_id == self.unique_id and connected:
self._available = True
_LOGGER.info("Player %s is available again", self.name)
self._remove_dispatcher()
@property
def state(self):
"""Return the state of the device."""
if not self._player.power:
return STATE_OFF
if self._player.mode:
return SQUEEZEBOX_MODE.get(self._player.mode)
return None
async def async_update(self):
"""Update the Player() object."""
# only update available players, newly available players will be rediscovered and marked available
if self._available:
last_media_position = self.media_position
await self._player.async_update()
if self.media_position != last_media_position:
self._last_update = utcnow()
if self._player.connected is False:
_LOGGER.info("Player %s is not available", self.name)
self._available = False
# start listening for restored players
self._remove_dispatcher = async_dispatcher_connect(
self.hass, SIGNAL_PLAYER_REDISCOVERED, self.rediscovered
)
async def async_will_remove_from_hass(self):
"""Remove from list of known players when removed from hass."""
self.hass.data[DOMAIN][KNOWN_PLAYERS].remove(self)
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._player.volume:
return int(float(self._player.volume)) / 100.0
@property
def is_volume_muted(self):
"""Return true if volume is muted."""
return self._player.muting
@property
def media_content_id(self):
"""Content ID of current playing media."""
if not self._player.playlist:
return None
if len(self._player.playlist) > 1:
urls = [{"url": track["url"]} for track in self._player.playlist]
return json.dumps({"index": self._player.current_index, "urls": urls})
return self._player.url
@property
def media_content_type(self):
"""Content type of current playing media."""
if not self._player.playlist:
return None
if len(self._player.playlist) > 1:
return MEDIA_TYPE_PLAYLIST
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._player.duration
@property
def media_position(self):
"""Position of current playing media in seconds."""
return self._player.time
@property
def media_position_updated_at(self):
"""Last time status was updated."""
return self._last_update
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._player.image_url
@property
def media_title(self):
"""Title of current playing media."""
return self._player.title
@property
def media_artist(self):
"""Artist of current playing media."""
return self._player.artist
@property
def media_album_name(self):
"""Album of current playing media."""
return self._player.album
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return self._player.shuffle
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SQUEEZEBOX
@property
def sync_group(self):
"""List players we are synced with."""
player_ids = {
p.unique_id: p.entity_id for p in self.hass.data[DOMAIN][KNOWN_PLAYERS]
}
sync_group = []
for player in self._player.sync_group:
if player in player_ids:
sync_group.append(player_ids[player])
return sync_group
@property
def query_result(self):
"""Return the result from the call_query service."""
return self._query_result
async def async_turn_off(self):
"""Turn off media player."""
await self._player.async_set_power(False)
async def async_volume_up(self):
"""Volume up media player."""
await self._player.async_set_volume("+5")
async def async_volume_down(self):
"""Volume down media player."""
await self._player.async_set_volume("-5")
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
volume_percent = str(int(volume * 100))
await self._player.async_set_volume(volume_percent)
async def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
await self._player.async_set_muting(mute)
async def async_media_stop(self):
"""Send stop command to media player."""
await self._player.async_stop()
async def async_media_play_pause(self):
"""Send pause command to media player."""
await self._player.async_toggle_pause()
async def async_media_play(self):
"""Send play command to media player."""
await self._player.async_play()
async def async_media_pause(self):
"""Send pause command to media player."""
await self._player.async_pause()
async def async_media_next_track(self):
"""Send next track command."""
await self._player.async_index("+1")
async def async_media_previous_track(self):
"""Send next track command."""
await self._player.async_index("-1")
async def async_media_seek(self, position):
"""Send seek command."""
await self._player.async_time(position)
async def async_turn_on(self):
"""Turn the media player on."""
await self._player.async_set_power(True)
async def async_play_media(self, media_type, media_id, **kwargs):
"""
Send the play_media command to the media player.
If ATTR_MEDIA_ENQUEUE is True, add `media_id` to the current playlist.
"""
cmd = "play"
index = None
if kwargs.get(ATTR_MEDIA_ENQUEUE):
cmd = "add"
if media_type == MEDIA_TYPE_MUSIC:
await self._player.async_load_url(media_id, cmd)
return
if media_type == MEDIA_TYPE_PLAYLIST:
try:
# a saved playlist by number
payload = {
"search_id": int(media_id),
"search_type": MEDIA_TYPE_PLAYLIST,
}
playlist = await generate_playlist(self._player, payload)
except ValueError:
# a list of urls
content = json.loads(media_id)
playlist = content["urls"]
index = content["index"]
else:
payload = {
"search_id": media_id,
"search_type": media_type,
}
playlist = await generate_playlist(self._player, payload)
_LOGGER.debug("Generated playlist: %s", playlist)
await self._player.async_load_playlist(playlist, cmd)
if index is not None:
await self._player.async_index(index)
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
shuffle_mode = "song" if shuffle else "none"
await self._player.async_set_shuffle(shuffle_mode)
async def async_clear_playlist(self):
"""Send the media player the command for clear playlist."""
await self._player.async_clear_playlist()
async def async_call_method(self, command, parameters=None):
"""
Call Squeezebox JSON/RPC method.
Additional parameters are added to the command to form the list of
positional parameters (p0, p1..., pN) passed to JSON/RPC server.
"""
all_params = [command]
if parameters:
for parameter in parameters:
all_params.append(parameter)
await self._player.async_query(*all_params)
async def async_call_query(self, command, parameters=None):
"""
Call Squeezebox JSON/RPC method where we care about the result.
Additional parameters are added to the command to form the list of
positional parameters (p0, p1..., pN) passed to JSON/RPC server.
"""
all_params = [command]
if parameters:
for parameter in parameters:
all_params.append(parameter)
self._query_result = await self._player.async_query(*all_params)
_LOGGER.debug("call_query got result %s", self._query_result)
async def async_sync(self, other_player):
"""
Add another Squeezebox player to this player's sync group.
If the other player is a member of a sync group, it will leave the current sync group
without asking.
"""
player_ids = {
p.entity_id: p.unique_id for p in self.hass.data[DOMAIN][KNOWN_PLAYERS]
}
other_player_id = player_ids.get(other_player)
if other_player_id:
await self._player.async_sync(other_player_id)
else:
_LOGGER.info("Could not find player_id for %s. Not syncing", other_player)
async def async_unsync(self):
"""Unsync this Squeezebox player."""
await self._player.async_unsync()
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
_LOGGER.debug(
"Reached async_browse_media with content_type %s and content_id %s",
media_content_type,
media_content_id,
)
if media_content_type in [None, "library"]:
return await library_payload(self._player)
payload = {
"search_type": media_content_type,
"search_id": media_content_id,
}
return await build_item_response(self._player, payload)
|
from __future__ import absolute_import
import sys
import vim # noqa
try:
from importlib.machinery import PathFinder as _PathFinder
if not hasattr(vim, 'find_module'):
vim.find_module = _PathFinder.find_module
except ImportError:
pass
def auto():
"""Fix PEP8 erorrs in current buffer.
pymode: uses it in command PymodeLintAuto with pymode#lint#auto()
"""
from .autopep8 import fix_file
class Options(object):
aggressive = 1
diff = False
experimental = True
ignore = vim.eval('g:pymode_lint_ignore')
in_place = True
indent_size = int(vim.eval('&tabstop'))
line_range = None
hang_closing = False
max_line_length = int(vim.eval('g:pymode_options_max_line_length'))
pep8_passes = 100
recursive = False
select = vim.eval('g:pymode_lint_select')
verbose = 0
fix_file(vim.current.buffer.name, Options)
def get_documentation():
"""Search documentation and append to current buffer."""
from ._compat import StringIO
sys.stdout, _ = StringIO(), sys.stdout
help(vim.eval('a:word'))
sys.stdout, out = _, sys.stdout.getvalue()
vim.current.buffer.append(str(out).splitlines(), 0)
|
import argparse
import logging
import os
import pwd
from multiprocessing import Pool
from arctic.decorators import _get_host
from arctic.store.audit import ArcticTransaction
from .utils import setup_logging
from ..date import DateRange, to_pandas_closed_closed, CLOSED_OPEN, OPEN_CLOSED
from ..hosts import get_arctic_lib
logger = logging.getLogger(__name__)
# Use the UID rather than environment variables for auditing
USER = pwd.getpwuid(os.getuid())[0]
def copy_symbols_helper(src, dest, log, force, splice):
def _copy_symbol(symbols):
for symbol in symbols:
with ArcticTransaction(dest, symbol, USER, log) as mt:
existing_data = dest.has_symbol(symbol)
if existing_data:
if force:
logger.warn("Symbol: %s already exists in destination, OVERWRITING" % symbol)
elif splice:
logger.warn("Symbol: %s already exists in destination, splicing in new data" % symbol)
else:
logger.warn("Symbol: {} already exists in {}@{}, use --force to overwrite or --splice to join "
"with existing data".format(symbol, _get_host(dest).get('l'),
_get_host(dest).get('mhost')))
continue
version = src.read(symbol)
new_data = version.data
if existing_data and splice:
original_data = dest.read(symbol).data
preserve_start = to_pandas_closed_closed(DateRange(None, new_data.index[0].to_pydatetime(),
interval=CLOSED_OPEN)).end
preserve_end = to_pandas_closed_closed(DateRange(new_data.index[-1].to_pydatetime(),
None,
interval=OPEN_CLOSED)).start
if not original_data.index.tz:
# No timezone on the original, should we even allow this?
preserve_start = preserve_start.replace(tzinfo=None)
preserve_end = preserve_end.replace(tzinfo=None)
before = original_data.loc[:preserve_start]
after = original_data[preserve_end:]
new_data = before.append(new_data).append(after)
mt.write(symbol, new_data, metadata=version.metadata)
return _copy_symbol
def main():
usage = """
Copy data from one MongoDB instance to another.
Example:
arctic_copy_data --log "Copying data" --src user.library@host1 --dest user.library@host2 symbol1 symbol2
"""
setup_logging()
p = argparse.ArgumentParser(usage=usage)
p.add_argument("--src", required=True, help="Source MongoDB like: library@hostname:port")
p.add_argument("--dest", required=True, help="Destination MongoDB like: library@hostname:port")
p.add_argument("--log", required=True, help="Data CR")
p.add_argument("--force", default=False, action='store_true', help="Force overwrite of existing data for symbol.")
p.add_argument("--splice", default=False, action='store_true', help="Keep existing data before and after the new data.")
p.add_argument("--parallel", default=1, type=int, help="Number of imports to run in parallel.")
p.add_argument("symbols", nargs='+', type=str, help="List of symbol regexes to copy from source to dest.")
opts = p.parse_args()
src = get_arctic_lib(opts.src)
dest = get_arctic_lib(opts.dest)
logger.info("Copying data from %s -> %s" % (opts.src, opts.dest))
# Prune the list of symbols from the library according to the list of symbols.
required_symbols = set()
for symbol in opts.symbols:
required_symbols.update(src.list_symbols(regex=symbol))
required_symbols = sorted(required_symbols)
logger.info("Copying: {} symbols".format(len(required_symbols)))
if len(required_symbols) < 1:
logger.warn("No symbols found that matched those provided.")
return
# Function we'll call to do the data copying
copy_symbol = copy_symbols_helper(src, dest, opts.log, opts.force, opts.splice)
if opts.parallel > 1:
logger.info("Starting: {} jobs".format(opts.parallel))
pool = Pool(processes=opts.parallel)
# Break the jobs into chunks for multiprocessing
chunk_size = len(required_symbols) / opts.parallel
chunk_size = max(chunk_size, 1)
chunks = [required_symbols[offs:offs + chunk_size] for offs in
range(0, len(required_symbols), chunk_size)]
assert sum(len(x) for x in chunks) == len(required_symbols)
pool.apply(copy_symbol, chunks)
else:
copy_symbol(required_symbols)
if __name__ == '__main__':
main()
|
from datetime import timedelta
from typing import Any, Mapping
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import DATA_BYTES, DATA_RATE_KIBIBYTES_PER_SECOND
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import (
BYTES_RECEIVED,
BYTES_SENT,
CONFIG_ENTRY_SCAN_INTERVAL,
CONFIG_ENTRY_UDN,
DATA_PACKETS,
DATA_RATE_PACKETS_PER_SECOND,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
DOMAIN_COORDINATORS,
DOMAIN_DEVICES,
KIBIBYTE,
LOGGER as _LOGGER,
PACKETS_RECEIVED,
PACKETS_SENT,
TIMESTAMP,
)
from .device import Device
SENSOR_TYPES = {
BYTES_RECEIVED: {
"device_value_key": BYTES_RECEIVED,
"name": f"{DATA_BYTES} received",
"unit": DATA_BYTES,
"unique_id": BYTES_RECEIVED,
"derived_name": f"{DATA_RATE_KIBIBYTES_PER_SECOND} received",
"derived_unit": DATA_RATE_KIBIBYTES_PER_SECOND,
"derived_unique_id": "KiB/sec_received",
},
BYTES_SENT: {
"device_value_key": BYTES_SENT,
"name": f"{DATA_BYTES} sent",
"unit": DATA_BYTES,
"unique_id": BYTES_SENT,
"derived_name": f"{DATA_RATE_KIBIBYTES_PER_SECOND} sent",
"derived_unit": DATA_RATE_KIBIBYTES_PER_SECOND,
"derived_unique_id": "KiB/sec_sent",
},
PACKETS_RECEIVED: {
"device_value_key": PACKETS_RECEIVED,
"name": f"{DATA_PACKETS} received",
"unit": DATA_PACKETS,
"unique_id": PACKETS_RECEIVED,
"derived_name": f"{DATA_RATE_PACKETS_PER_SECOND} received",
"derived_unit": DATA_RATE_PACKETS_PER_SECOND,
"derived_unique_id": "packets/sec_received",
},
PACKETS_SENT: {
"device_value_key": PACKETS_SENT,
"name": f"{DATA_PACKETS} sent",
"unit": DATA_PACKETS,
"unique_id": PACKETS_SENT,
"derived_name": f"{DATA_RATE_PACKETS_PER_SECOND} sent",
"derived_unit": DATA_RATE_PACKETS_PER_SECOND,
"derived_unique_id": "packets/sec_sent",
},
}
async def async_setup_platform(
hass: HomeAssistantType, config, async_add_entities, discovery_info=None
) -> None:
"""Old way of setting up UPnP/IGD sensors."""
_LOGGER.debug(
"async_setup_platform: config: %s, discovery: %s", config, discovery_info
)
async def async_setup_entry(
hass, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the UPnP/IGD sensors."""
data = config_entry.data
if CONFIG_ENTRY_UDN in data:
udn = data[CONFIG_ENTRY_UDN]
else:
# any device will do
udn = list(hass.data[DOMAIN][DOMAIN_DEVICES])[0]
device: Device = hass.data[DOMAIN][DOMAIN_DEVICES][udn]
update_interval_sec = config_entry.options.get(
CONFIG_ENTRY_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
)
update_interval = timedelta(seconds=update_interval_sec)
_LOGGER.debug("update_interval: %s", update_interval)
_LOGGER.debug("Adding sensors")
coordinator = DataUpdateCoordinator[Mapping[str, Any]](
hass,
_LOGGER,
name=device.name,
update_method=device.async_get_traffic_data,
update_interval=update_interval,
)
await coordinator.async_refresh()
hass.data[DOMAIN][DOMAIN_COORDINATORS][udn] = coordinator
sensors = [
RawUpnpSensor(coordinator, device, SENSOR_TYPES[BYTES_RECEIVED]),
RawUpnpSensor(coordinator, device, SENSOR_TYPES[BYTES_SENT]),
RawUpnpSensor(coordinator, device, SENSOR_TYPES[PACKETS_RECEIVED]),
RawUpnpSensor(coordinator, device, SENSOR_TYPES[PACKETS_SENT]),
DerivedUpnpSensor(coordinator, device, SENSOR_TYPES[BYTES_RECEIVED]),
DerivedUpnpSensor(coordinator, device, SENSOR_TYPES[BYTES_SENT]),
DerivedUpnpSensor(coordinator, device, SENSOR_TYPES[PACKETS_RECEIVED]),
DerivedUpnpSensor(coordinator, device, SENSOR_TYPES[PACKETS_SENT]),
]
async_add_entities(sensors, True)
class UpnpSensor(CoordinatorEntity):
"""Base class for UPnP/IGD sensors."""
def __init__(
self,
coordinator: DataUpdateCoordinator[Mapping[str, Any]],
device: Device,
sensor_type: Mapping[str, str],
update_multiplier: int = 2,
) -> None:
"""Initialize the base sensor."""
super().__init__(coordinator)
self._device = device
self._sensor_type = sensor_type
self._update_counter_max = update_multiplier
self._update_counter = 0
@property
def icon(self) -> str:
"""Icon to use in the frontend, if any."""
return "mdi:server-network"
@property
def available(self) -> bool:
"""Return if entity is available."""
device_value_key = self._sensor_type["device_value_key"]
return (
self.coordinator.last_update_success
and device_value_key in self.coordinator.data
)
@property
def name(self) -> str:
"""Return the name of the sensor."""
return f"{self._device.name} {self._sensor_type['name']}"
@property
def unique_id(self) -> str:
"""Return an unique ID."""
return f"{self._device.udn}_{self._sensor_type['unique_id']}"
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement of this entity, if any."""
return self._sensor_type["unit"]
@property
def device_info(self) -> Mapping[str, Any]:
"""Get device info."""
return {
"connections": {(dr.CONNECTION_UPNP, self._device.udn)},
"name": self._device.name,
"manufacturer": self._device.manufacturer,
"model": self._device.model_name,
}
class RawUpnpSensor(UpnpSensor):
"""Representation of a UPnP/IGD sensor."""
@property
def state(self) -> str:
"""Return the state of the device."""
device_value_key = self._sensor_type["device_value_key"]
value = self.coordinator.data[device_value_key]
if value is None:
return None
return format(value, "d")
class DerivedUpnpSensor(UpnpSensor):
"""Representation of a UNIT Sent/Received per second sensor."""
def __init__(self, coordinator, device, sensor_type) -> None:
"""Initialize sensor."""
super().__init__(coordinator, device, sensor_type)
self._last_value = None
self._last_timestamp = None
@property
def name(self) -> str:
"""Return the name of the sensor."""
return f"{self._device.name} {self._sensor_type['derived_name']}"
@property
def unique_id(self) -> str:
"""Return an unique ID."""
return f"{self._device.udn}_{self._sensor_type['derived_unique_id']}"
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement of this entity, if any."""
return self._sensor_type["derived_unit"]
def _has_overflowed(self, current_value) -> bool:
"""Check if value has overflowed."""
return current_value < self._last_value
@property
def state(self) -> str:
"""Return the state of the device."""
# Can't calculate any derivative if we have only one value.
device_value_key = self._sensor_type["device_value_key"]
current_value = self.coordinator.data[device_value_key]
if current_value is None:
return None
current_timestamp = self.coordinator.data[TIMESTAMP]
if self._last_value is None or self._has_overflowed(current_value):
self._last_value = current_value
self._last_timestamp = current_timestamp
return None
# Calculate derivative.
delta_value = current_value - self._last_value
if self._sensor_type["unit"] == DATA_BYTES:
delta_value /= KIBIBYTE
delta_time = current_timestamp - self._last_timestamp
if delta_time.seconds == 0:
# Prevent division by 0.
return None
derived = delta_value / delta_time.seconds
# Store current values for future use.
self._last_value = current_value
self._last_timestamp = current_timestamp
return format(derived, ".1f")
|
import logging
from pylutron_caseta.smartbridge import Smartbridge
from homeassistant import config_entries
from homeassistant.const import CONF_HOST
from . import DOMAIN # pylint: disable=unused-import
from .const import (
ABORT_REASON_ALREADY_CONFIGURED,
ABORT_REASON_CANNOT_CONNECT,
CONF_CA_CERTS,
CONF_CERTFILE,
CONF_KEYFILE,
ERROR_CANNOT_CONNECT,
STEP_IMPORT_FAILED,
)
_LOGGER = logging.getLogger(__name__)
ENTRY_DEFAULT_TITLE = "Caséta bridge"
class LutronCasetaFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle Lutron Caseta config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize a Lutron Caseta flow."""
self.data = {}
async def async_step_import(self, import_info):
"""Import a new Caseta bridge as a config entry.
This flow is triggered by `async_setup`.
"""
# Abort if existing entry with matching host exists.
host = import_info[CONF_HOST]
if any(
host == entry.data[CONF_HOST] for entry in self._async_current_entries()
):
return self.async_abort(reason=ABORT_REASON_ALREADY_CONFIGURED)
# Store the imported config for other steps in this flow to access.
self.data[CONF_HOST] = host
self.data[CONF_KEYFILE] = import_info[CONF_KEYFILE]
self.data[CONF_CERTFILE] = import_info[CONF_CERTFILE]
self.data[CONF_CA_CERTS] = import_info[CONF_CA_CERTS]
if not await self.async_validate_connectable_bridge_config():
# Ultimately we won't have a dedicated step for import failure, but
# in order to keep configuration.yaml-based configs transparently
# working without requiring further actions from the user, we don't
# display a form at all before creating a config entry in the
# default case, so we're only going to show a form in case the
# import fails.
# This will change in an upcoming release where UI-based config flow
# will become the default for the Lutron Caseta integration (which
# will require users to go through a confirmation flow for imports).
return await self.async_step_import_failed()
return self.async_create_entry(title=ENTRY_DEFAULT_TITLE, data=self.data)
async def async_step_import_failed(self, user_input=None):
"""Make failed import surfaced to user."""
if user_input is None:
return self.async_show_form(
step_id=STEP_IMPORT_FAILED,
description_placeholders={"host": self.data[CONF_HOST]},
errors={"base": ERROR_CANNOT_CONNECT},
)
return self.async_abort(reason=ABORT_REASON_CANNOT_CONNECT)
async def async_validate_connectable_bridge_config(self):
"""Check if we can connect to the bridge with the current config."""
try:
bridge = Smartbridge.create_tls(
hostname=self.data[CONF_HOST],
keyfile=self.hass.config.path(self.data[CONF_KEYFILE]),
certfile=self.hass.config.path(self.data[CONF_CERTFILE]),
ca_certs=self.hass.config.path(self.data[CONF_CA_CERTS]),
)
await bridge.connect()
if not bridge.is_connected():
return False
await bridge.close()
return True
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Unknown exception while checking connectivity to bridge %s",
self.data[CONF_HOST],
)
return False
|
import pytest
from requests_toolbelt import sessions
import cherrypy._cpnative_server
pytestmark = pytest.mark.skipif(
'sys.platform == "win32"',
reason='tests fail on Windows',
)
@pytest.fixture
def cp_native_server(request):
"""A native server."""
class Root(object):
@cherrypy.expose
def index(self):
return 'Hello World!'
cls = cherrypy._cpnative_server.CPHTTPServer
cherrypy.server.httpserver = cls(cherrypy.server)
cherrypy.tree.mount(Root(), '/')
cherrypy.engine.start()
request.addfinalizer(cherrypy.engine.stop)
url = 'http://localhost:{cherrypy.server.socket_port}'.format(**globals())
return sessions.BaseUrlSession(url)
def test_basic_request(cp_native_server):
"""A request to a native server should succeed."""
resp = cp_native_server.get('/')
assert resp.ok
assert resp.status_code == 200
assert resp.text == 'Hello World!'
|
from arcam.fmj.client import ConnectionFailed
import pytest
from homeassistant import data_entry_flow
from homeassistant.components import ssdp
from homeassistant.components.arcam_fmj.config_flow import get_entry_client
from homeassistant.components.arcam_fmj.const import DOMAIN, DOMAIN_DATA_ENTRIES
from homeassistant.config_entries import SOURCE_SSDP, SOURCE_USER
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_SOURCE
from .conftest import (
MOCK_CONFIG_ENTRY,
MOCK_HOST,
MOCK_NAME,
MOCK_PORT,
MOCK_UDN,
MOCK_UUID,
)
from tests.async_mock import AsyncMock, patch
from tests.common import MockConfigEntry
MOCK_UPNP_DEVICE = f"""
<root xmlns="urn:schemas-upnp-org:device-1-0">
<device>
<UDN>{MOCK_UDN}</UDN>
</device>
</root>
"""
MOCK_UPNP_LOCATION = f"http://{MOCK_HOST}:8080/dd.xml"
MOCK_DISCOVER = {
ssdp.ATTR_UPNP_MANUFACTURER: "ARCAM",
ssdp.ATTR_UPNP_MODEL_NAME: " ",
ssdp.ATTR_UPNP_MODEL_NUMBER: "AVR450, AVR750",
ssdp.ATTR_UPNP_FRIENDLY_NAME: f"Arcam media client {MOCK_UUID}",
ssdp.ATTR_UPNP_SERIAL: "12343",
ssdp.ATTR_SSDP_LOCATION: f"http://{MOCK_HOST}:8080/dd.xml",
ssdp.ATTR_UPNP_UDN: MOCK_UDN,
ssdp.ATTR_UPNP_DEVICE_TYPE: "urn:schemas-upnp-org:device:MediaRenderer:1",
}
@pytest.fixture(name="dummy_client", autouse=True)
def dummy_client_fixture(hass):
"""Mock out the real client."""
with patch("homeassistant.components.arcam_fmj.config_flow.Client") as client:
client.return_value.start.side_effect = AsyncMock(return_value=None)
client.return_value.stop.side_effect = AsyncMock(return_value=None)
yield client.return_value
async def test_ssdp(hass, dummy_client):
"""Test a ssdp import flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP},
data=MOCK_DISCOVER,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == f"Arcam FMJ ({MOCK_HOST})"
assert result["data"] == MOCK_CONFIG_ENTRY
async def test_ssdp_abort(hass):
"""Test a ssdp import flow."""
entry = MockConfigEntry(
domain=DOMAIN, data=MOCK_CONFIG_ENTRY, title=MOCK_NAME, unique_id=MOCK_UUID
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP},
data=MOCK_DISCOVER,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_ssdp_unable_to_connect(hass, dummy_client):
"""Test a ssdp import flow."""
dummy_client.start.side_effect = AsyncMock(side_effect=ConnectionFailed)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP},
data=MOCK_DISCOVER,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_ssdp_update(hass):
"""Test a ssdp import flow."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "old_host", CONF_PORT: MOCK_PORT},
title=MOCK_NAME,
unique_id=MOCK_UUID,
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP},
data=MOCK_DISCOVER,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert entry.data[CONF_HOST] == MOCK_HOST
async def test_user(hass, aioclient_mock):
"""Test a manual user configuration flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data=None,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
user_input = {
CONF_HOST: MOCK_HOST,
CONF_PORT: MOCK_PORT,
}
aioclient_mock.get(MOCK_UPNP_LOCATION, text=MOCK_UPNP_DEVICE)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == f"Arcam FMJ ({MOCK_HOST})"
assert result["data"] == MOCK_CONFIG_ENTRY
assert result["result"].unique_id == MOCK_UUID
async def test_invalid_ssdp(hass, aioclient_mock):
"""Test a a config flow where ssdp fails."""
user_input = {
CONF_HOST: MOCK_HOST,
CONF_PORT: MOCK_PORT,
}
aioclient_mock.get(MOCK_UPNP_LOCATION, text="")
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data=user_input,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == f"Arcam FMJ ({MOCK_HOST})"
assert result["data"] == MOCK_CONFIG_ENTRY
assert result["result"].unique_id is None
async def test_user_wrong(hass, aioclient_mock):
"""Test a manual user configuration flow with no ssdp response."""
user_input = {
CONF_HOST: MOCK_HOST,
CONF_PORT: MOCK_PORT,
}
aioclient_mock.get(MOCK_UPNP_LOCATION, status=404)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data=user_input,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == f"Arcam FMJ ({MOCK_HOST})"
assert result["result"].unique_id is None
async def test_get_entry_client(hass):
"""Test helper for configuration."""
entry = MockConfigEntry(
domain=DOMAIN, data=MOCK_CONFIG_ENTRY, title=MOCK_NAME, unique_id=MOCK_UUID
)
hass.data[DOMAIN_DATA_ENTRIES] = {entry.entry_id: "dummy"}
assert get_entry_client(hass, entry) == "dummy"
|
from pscript import this_is_js, RawJS
from pscript.stubs import window, undefined, time, console, JSON
# This module gets transpiled to JavaScript as a whole
__pscript__ = True
class Flexx:
""" JavaScript Flexx module. This provides the connection between
the Python and JS (via a websocket).
"""
def __init__(self):
if window.flexx.init:
raise RuntimeError('Should not create global Flexx object more than once.')
# Init (overloadable) variables. These can be set by creating
# a window.flexx object *before* instantiating this class, or by
# setting them on this object before the init() is called.
self.is_notebook = False
self.is_exported = False
# Copy attributes from temporary object (e.g. is_notebook, require, ...)
for key in window.flexx.keys():
self[key] = window.flexx[key]
# We need a global main widget (shared between sessions)
self.need_main_widget = True # Used/set in ui/_widget.py
# Keep track of sessions
self._session_count = 0
self.sessions = {}
# Note: flexx.init() is not auto-called when Flexx is embedded
window.addEventListener('load', self.init, False)
window.addEventListener('unload', self.exit, False) # not beforeunload
def init(self):
""" Called after document is loaded. """
# Create div to put dynamic CSS assets in
self.asset_node = window.document.createElement("div")
self.asset_node.id = 'Flexx asset container'
window.document.body.appendChild(self.asset_node)
if self.is_exported:
if self.is_notebook:
print('Flexx: I am in an exported notebook!')
else:
print('Flexx: I am in an exported app!')
self.run_exported_app()
else:
print('Flexx: Initializing')
if not self.is_notebook:
self._remove_querystring()
self.init_logging()
def _remove_querystring(self):
# remove querystring ?session=x
try:
window.history.replaceState(window.history.state, '',
window.location.pathname)
except Exception:
pass # e.g. firefox-app/nw
def exit(self):
""" Called when runtime is about to quit. """
for session in self.sessions.values():
session.exit()
def spin(self, n=1):
RawJS("""
var el = window.document.getElementById('flexx-spinner');
if (el) {
if (n === null) { // Hide the spinner overlay, now or in a bit
if (el.children[0].innerHTML.indexOf('limited') > 0) {
setTimeout(function() { el.style.display = 'none'; }, 2000);
} else {
el.style.display = 'none';
}
} else {
for (var i=0; i<n; i++) { el.children[1].innerHTML += '■'; }
}
}
""")
def init_logging(self):
""" Setup logging so that messages are proxied to Python.
"""
if window.console.ori_log:
return # already initialized the loggers
# Keep originals
window.console.ori_log = window.console.log
window.console.ori_info = window.console.info or window.console.log
window.console.ori_warn = window.console.warn or window.console.log
window.console.ori_error = window.console.error or window.console.log
def log(msg):
window.console.ori_log(msg)
for session in self.sessions.values():
session.send_command("PRINT", str(msg))
def info(msg):
window.console.ori_info(msg)
for session in self.sessions.values():
session.send_command("INFO", str(msg))
def warn(msg):
window.console.ori_warn(msg)
for session in self.sessions.values():
session.send_command("WARN", str(msg))
def error(msg):
evt = dict(message=str(msg), error=msg, preventDefault=lambda: None)
on_error(evt)
def on_error(evt):
self._handle_error(evt)
on_error = on_error.bind(self)
# Set new versions
window.console.log = log
window.console.info = info
window.console.warn = warn
window.console.error = error
# Create error handler, so that JS errors get into Python
window.addEventListener('error', on_error, False)
def create_session(self, app_name, session_id, ws_url):
# The call to this method is embedded by get_page(),
# or injected by init_notebook().
# Can be called before init() is called.
if window.performance and window.performance.navigation.type == 2:
# Force reload when we got here with back-button, otherwise
# an old session-id is used, see issue #530
window.location.reload()
elif self._validate_browser_capabilities():
s = JsSession(app_name, session_id, ws_url)
self._session_count += 1
self['s' + self._session_count] = s
self.sessions[session_id] = s
def _validate_browser_capabilities(self):
# We test a handful of features here, and assume that if these work,
# all of Flexx works. It is not a hard guarantee, of course, because
# the user can use modern features in an application.
RawJS("""
var el = window.document.getElementById('flexx-spinner');
if ( window.WebSocket === undefined || // IE10+
Object.keys === undefined || // IE9+
false
) {
var msg = ('Flexx does not support this browser.<br>' +
'Try Firefox, Chrome, ' +
'or a more recent version of the current browser.');
if (el) { el.children[0].innerHTML = msg; }
else { window.alert(msg); }
return false;
} else if (''.startsWith === undefined) { // probably IE
var msg = ('Flexx support for this browser is limited.<br>' +
'Consider using Firefox, Chrome, or maybe Edge.');
if (el) { el.children[0].innerHTML = msg; }
return true;
} else {
return true;
}
""")
def _handle_error(self, evt):
msg = short_msg = evt.message
if not window.evt:
window.evt = evt
if evt.error and evt.error.stack: # evt.error can be None for syntax err
stack = evt.error.stack.splitlines()
# Some replacements
session_needle = '?session_id=' + self.id
for i in range(len(stack)):
stack[i] = stack[i].replace('@', ' @ ').replace(session_needle, '')
# Strip items from the start
for x in [evt.message, '_pyfunc_op_error']:
if x in stack[0]:
stack.pop(0)
# Truncate the stack
for i in range(len(stack)):
for x in ['_process_actions', '_process_reactions', '_process_calls']:
if ('Loop.' + x) in stack[i]:
stack = stack[:i]
break
# Pop items from in between
for i in reversed(range(len(stack))):
for x in ['flx_action ']:
if stack[i] and stack[i].count(x):
stack.pop(i)
# Combine and tweak the message some more
msg += '\n' + '\n'.join(stack)
elif evt.message and evt.lineno: # message, url, linenumber
msg += "\nIn %s:%i" % (evt.filename, evt.lineno)
# Handle error
evt.preventDefault() # Don't do the standard error
window.console.ori_error(msg)
for session in self.sessions.values():
session.send_command("ERROR", short_msg)
class JsSession:
def __init__(self, app_name, id, ws_url=None):
self.app = None # the root component (can be a PyComponent)
self.app_name = app_name
self.id = id
self.status = 1
self.ws_url = ws_url
self._component_counter = 0
self._disposed_ob = {'_disposed': True}
# Maybe this is JLab
if not self.id:
jconfig = window.document.getElementById('jupyter-config-data')
if jconfig:
try:
config = JSON.parse(jconfig.innerText)
self.id = config.flexx_session_id
self.app_name = config.flexx_app_name
except Exception as err:
print(err)
# Init internal variables
self._init_time = time()
self._pending_commands = [] # to pend raw commands during init
self._asset_count = 0
self._ws = None
self.last_msg = None
# self.classes = {}
self.instances = {}
self.instances_to_check_size = {}
if not window.flexx.is_exported:
self.init_socket()
# Initiate service to track resize
window.addEventListener('resize', self._check_size_of_objects, False)
window.setInterval(self._check_size_of_objects, 1000)
def exit(self):
if self._ws: # is not null or undefined
self._ws.close()
self._ws = None
self.status = 0
# flexx.instances.sessions.pop(self) might be good,
# but perhaps not that much need, and leaving is nice for debugging.
def send_command(self, *command):
if self._ws is not None:
try:
bb = serializer.encode(command)
except Exception as err:
print('Command that failed to encode:')
print(command)
raise err
self._ws.send(bb)
def instantiate_component(self, module, cname, id, args, kwargs, active_components):
# Maybe we still have the instance?
c = self.instances.get(id, None)
if c is not None and c._disposed is False:
return c
# Find the class
m = window.flexx.require(module)
Cls = m[cname] # noqa
# Instantiate. If given, replicate the active components by which the
# JsComponent was instantiated in Python.
kwargs['flx_session'] = self
kwargs['flx_id'] = id
active_components = active_components or []
for ac in active_components:
ac.__enter__()
try:
c = Cls(*args, **kwargs)
finally:
for ac in reversed(active_components):
ac.__exit__()
return c
def _register_component(self, c, id=None):
if self.app is None:
self.app = c # Set our root component; is the first to register
if id is None:
self._component_counter += 1
id = c.__name__ + '_' + str(self._component_counter) + 'js'
c._id = id
c._uid = self.id + '_' + id
self.instances[c._id] = c
def _unregister_component(self, c):
self.instances_to_check_size.pop(c.id, None)
pass # c gets popped from self.instances by DISPOSE_ACK command
def get_component_instance(self, id):
""" Get instance of a Component class, or None. Or the document body
if "body" is given.
"""
if id == 'body':
return window.document.body
else:
return self.instances.get(id, None)
def init_socket(self):
""" Make the connection to Python.
"""
# Check WebSocket support
WebSocket = window.WebSocket
if (WebSocket is undefined):
window.document.body.textContent = 'Browser does not support WebSockets'
raise "FAIL: need websocket"
# Construct ws url
if not self.ws_url:
proto = 'ws'
if window.location.protocol == 'https:':
proto = 'wss'
address = window.location.hostname
if window.location.port:
address += ':' + window.location.port
self.ws_url = '%s://%s/flexx/ws/%s' % (proto, address, self.app_name)
# Resolve public hostname
self.ws_url = self.ws_url.replace('0.0.0.0', window.location.hostname)
# Open web socket in binary mode
self._ws = ws = WebSocket(self.ws_url)
ws.binaryType = "arraybuffer"
self.status = 2
def on_ws_open(evt):
window.console.info('Socket opened with session id ' + self.id)
self.send_command('HI_FLEXX', self.id)
def on_ws_message(evt):
msg = evt.data # bsdf-encoded command
if not msg:
pass # ? drop glitchy message :/
elif self._pending_commands is None:
# Direct mode
self._receive_raw_command(msg)
else:
# Indirect mode, to give browser draw-time during loading
if len(self._pending_commands) == 0:
window.setTimeout(self._process_commands, 0)
self._pending_commands.push(msg)
def on_ws_close(evt):
self._ws = None
self.status = 0
msg = 'Lost connection with server'
if evt and evt.reason:
msg += ': %s (%i)' % (evt.reason, evt.code)
if not window.flexx.is_notebook:
# todo: show modal or cooky-like dialog instead of killing whole page
window.document.body.textContent = msg
else:
window.console.info(msg)
def on_ws_error(self, evt):
self._ws = None
self.status = 0
window.console.error('Socket error')
# Connect
ws.onopen = on_ws_open
ws.onmessage = on_ws_message
ws.onclose = on_ws_close
ws.onerror = on_ws_error
def _process_commands(self):
""" A less direct way to process commands, which gives the
browser time to draw about every other JS asset. This is a
tradeoff between a smooth spinner and fast load time.
"""
while self._pending_commands is not None and len(self._pending_commands) > 0:
msg = self._pending_commands.pop(0)
try:
command = self._receive_raw_command(msg)
except Exception as err:
window.setTimeout(self._process_commands, 0)
raise err
if command[0] == 'DEFINE':
self._asset_count += 1
if (self._asset_count % 3) == 0:
if len(self._pending_commands):
window.setTimeout(self._process_commands, 0)
break
def _receive_raw_command(self, msg):
return self._receive_command(serializer.decode(msg))
def _receive_command(self, command):
""" Process a command send from the server.
"""
cmd = command[0]
if cmd == 'PING':
# Used for roundtrip stuff, do at least one iter loop here ...
window.setTimeout(self.send_command, 10, 'PONG', command[1])
elif cmd == 'INIT_DONE':
window.flexx.spin(None)
while len(self._pending_commands):
self._receive_raw_command(self._pending_commands.pop(0))
self._pending_commands = None
# print('init took', time() - self._init_time)
elif cmd == 'PRINT':
(window.console.ori_log or window.console.log)(command[1])
elif cmd == 'EXEC':
eval(command[1])
elif cmd == 'EVAL':
x = None
if len(command) == 2:
x = eval(command[1])
elif len(command) == 3:
x = eval('this.instances.' + command[1] + '.' + command[2])
console.log(str(x)) # print (and thus also sends back result)
elif cmd == 'EVALANDRETURN':
try:
x = eval(command[1])
except Exception as err:
x = str(err)
eval_id = command[2] # to identify the result in Python
self.send_command("EVALRESULT", x, eval_id)
elif cmd == 'INVOKE':
id, name, args = command[1:]
ob = self.instances.get(id, None)
if ob is None:
console.warn('Cannot invoke %s.%s; '
'session does not know it (anymore).' % (id, name))
elif ob._disposed is True:
pass # deleted, but other end might not be aware when command was send
else:
ob[name](*args)
elif cmd == 'INSTANTIATE':
self.instantiate_component(*command[1:]) # module, cname, id, args, kwargs
elif cmd == 'DISPOSE':
id = command[1]
c = self.instances.get(id, None)
if c is not None and c._disposed is False: # else: no need to warn
c._dispose()
self.send_command('DISPOSE_ACK', command[1])
self.instances.pop(id, None) # Drop local reference now
elif cmd == 'DISPOSE_ACK':
self.instances.pop(command[1], None) # Drop reference
elif cmd == 'DEFINE':
#and command[1] == 'JS' or command[1] == 'DEFINE-JS-EVAL '):
kind, name, code = command[1:]
window.flexx.spin()
address = window.location.protocol + '//' + self.ws_url.split('/')[2]
code += '\n//# sourceURL=%s/flexx/assets/shared/%s\n' % (address, name)
if kind == 'JS-EVAL':
eval(code)
elif kind == 'JS':
# With this method, sourceURL does not work on Firefox,
# but eval might not work for assets that don't "use strict"
# (e.g. Bokeh). Note, btw, that creating links to assets does
# not work because these won't be loaded on time.
el = window.document.createElement("script")
el.id = name
el.innerHTML = code
window.flexx.asset_node.appendChild(el)
elif kind == 'CSS':
el = window.document.createElement("style")
el.type = "text/css"
el.id = name
el.innerHTML = code
window.flexx.asset_node.appendChild(el)
else:
window.console.error('Dont know how to DEFINE ' +
name + ' with "' + kind + '".')
elif cmd == 'OPEN':
window.win1 = window.open(command[1], 'new', 'chrome')
else:
window.console.error('Invalid command: "' + cmd + '"')
return command
def call_after_roundtrip(self, callback, *args):
ping_to_schedule_at = self._ping_counter + 1
if len(self._ping_calls) == 0 or self._ping_calls[-1][0] < ping_to_schedule_at:
window.setTimeout(self._send_ping, 0)
self._ping_calls.push((ping_to_schedule_at, callback, args))
def _send_ping(self):
self._ping_counter += 1
self.send_command('PING', self._ping_counter)
def _receive_pong(self, count):
while len(self._ping_calls) > 0 and self._ping_calls[0][0] <= count:
_, callback, args = self._ping_calls.pop(0)
window.setTimeout(callback, 0, *args)
def keep_checking_size_of(self, ob, check=True):
""" This is a service that the session provides.
"""
if check:
self.instances_to_check_size[ob.id] = ob
else:
self.instances_to_check_size.pop(ob.id, None)
def _check_size_of_objects(self):
for ob in self.instances_to_check_size.values():
if ob._disposed is False:
ob.check_real_size()
# In Python, we need some extras for the serializer to work
if this_is_js():
# Include bsdf.js
window.flexx = Flexx()
bsdf = RawJS("flexx.require('bsdf')")
serializer = bsdf.BsdfSerializer()
window.flexx.serializer = serializer
else:
# Import vendored bsdf lite module
from . import bsdf_lite as bsdf
serializer = bsdf.BsdfLiteSerializer()
serializer.__module__ = __name__
|
import json
import logging
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import memcached_server
from perfkitbenchmarker.memcache_service import MemcacheService
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import aws_network
from perfkitbenchmarker.providers.aws import util
ELASTICACHE_PORT = 11211
class ElastiCacheMemcacheService(MemcacheService):
"""Class for AWS elasticache memcache service."""
CLOUD = aws.CLOUD
def __init__(self, network, cluster_id, region, node_type, num_servers=1):
self.cluster_id = cluster_id
self.region = region
self.node_type = node_type
self.num_servers = num_servers
self.hosts = [] # [(ip, port)]
self.vpc_id = network.subnet.vpc_id
self.security_group_id = \
network.regional_network.vpc.default_security_group_id
self.subnet_id = network.subnet.id
self.subnet_group_name = '%ssubnet' % cluster_id
def Create(self):
# Open the port memcached needs
aws_network.AwsFirewall.GetFirewall() \
.AllowPortInSecurityGroup(self.region, self.security_group_id,
ELASTICACHE_PORT)
# Create a cache subnet group
cmd = ['aws', 'elasticache', 'create-cache-subnet-group',
'--region=%s' % self.region,
'--cache-subnet-group-name=%s' % self.subnet_group_name,
'--cache-subnet-group-description="PKB memcached_ycsb benchmark"',
'--subnet-ids=%s' % self.subnet_id]
vm_util.IssueCommand(cmd)
# Create the cluster
cmd = ['aws', 'elasticache', 'create-cache-cluster',
'--engine=memcached',
'--cache-subnet-group-name=%s' % self.subnet_group_name,
'--cache-cluster-id=%s' % self.cluster_id,
'--num-cache-nodes=%s' % self.num_servers,
'--region=%s' % self.region,
'--cache-node-type=%s' % self.node_type,
'--tags'] + util.MakeFormattedDefaultTags()
vm_util.IssueCommand(cmd)
# Wait for the cluster to come up
cluster_info = self._WaitForClusterUp()
# Parse out the hosts
self.hosts = \
[(node['Endpoint']['Address'], node['Endpoint']['Port'])
for node in cluster_info['CacheNodes']]
assert len(self.hosts) == self.num_servers
def Destroy(self):
# Delete the ElastiCache cluster
cmd = ['aws', 'elasticache', 'delete-cache-cluster',
'--cache-cluster-id=%s' % self.cluster_id,
'--region=%s' % self.region]
vm_util.IssueCommand(cmd, raise_on_failure=False)
# Don't have to delete the subnet group. It will be deleted with the subnet.
def Flush(self):
vm_util.RunThreaded(memcached_server.FlushMemcachedServer, self.hosts)
def GetHosts(self):
return ['%s:%s' % (ip, port) for ip, port in self.hosts]
def GetMetadata(self):
return {'num_servers': self.num_servers,
'elasticache_region': self.region,
'elasticache_node_type': self.node_type}
def _GetClusterInfo(self):
cmd = ['aws', 'elasticache', 'describe-cache-clusters']
cmd += ['--cache-cluster-id=%s' % self.cluster_id]
cmd += ['--region=%s' % self.region]
cmd += ['--show-cache-node-info']
out, _, _ = vm_util.IssueCommand(cmd)
return json.loads(out)['CacheClusters'][0]
@vm_util.Retry(poll_interval=15, timeout=300,
retryable_exceptions=(errors.Resource.RetryableCreationError))
def _WaitForClusterUp(self):
"""Block until the ElastiCache memcached cluster is up.
Will timeout after 5 minutes, and raise an exception. Before the timeout
expires any exceptions are caught and the status check is retried.
We check the status of the cluster using the AWS CLI.
Returns:
The cluster info json as a dict
Raises:
errors.Resource.RetryableCreationError when response is not as expected or
if there is an error connecting to the port or otherwise running the
remote check command.
"""
logging.info('Trying to get ElastiCache cluster info for %s',
self.cluster_id)
cluster_status = None
try:
cluster_info = self._GetClusterInfo()
cluster_status = cluster_info['CacheClusterStatus']
if cluster_status == 'available':
logging.info('ElastiCache memcached cluster is up and running.')
return cluster_info
except errors.VirtualMachine.RemoteCommandError as e:
raise errors.Resource.RetryableCreationError(
'ElastiCache memcached cluster not up yet: %s.' % str(e))
else:
raise errors.Resource.RetryableCreationError(
'ElastiCache memcached cluster not up yet. Status: %s' %
cluster_status)
|
from ReText import globalSettings
from ReText.preview import ReTextWebPreview
from ReText.syncscroll import SyncScroll
from PyQt5.QtCore import QEvent, Qt
from PyQt5.QtGui import QDesktopServices, QGuiApplication
from PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineView, QWebEngineSettings
class ReTextWebEnginePage(QWebEnginePage):
def __init__(self, parent, tab):
QWebEnginePage.__init__(self, parent)
self.tab = tab
def setScrollPosition(self, pos):
self.runJavaScript("window.scrollTo(%s, %s);" % (pos.x(), pos.y()))
def getPositionMap(self, callback):
def resultCallback(result):
if result:
return callback({int(a): b for a, b in result.items()})
script = """
var elements = document.querySelectorAll('[data-posmap]');
var result = {};
var bodyTop = document.body.getBoundingClientRect().top;
for (var i = 0; i < elements.length; ++i) {
var element = elements[i];
value = element.getAttribute('data-posmap');
bottom = element.getBoundingClientRect().bottom - bodyTop;
result[value] = bottom;
}
result;
"""
self.runJavaScript(script, resultCallback)
def javaScriptConsoleMessage(self, level, message, lineNumber, sourceId):
print("level=%r message=%r lineNumber=%r sourceId=%r" % (level, message, lineNumber, sourceId))
def acceptNavigationRequest(self, url, type, isMainFrame):
if url.scheme() == "data":
return True
if url.isLocalFile():
localFile = url.toLocalFile()
if localFile == self.tab.fileName:
self.tab.startPendingConversion()
return False
if self.tab.openSourceFile(localFile):
return False
if globalSettings.handleWebLinks:
return True
QDesktopServices.openUrl(url)
return False
class ReTextWebEnginePreview(ReTextWebPreview, QWebEngineView):
def __init__(self, tab,
editorPositionToSourceLineFunc,
sourceLineToEditorPositionFunc):
QWebEngineView.__init__(self, parent=tab)
webPage = ReTextWebEnginePage(self, tab)
self.setPage(webPage)
self.syncscroll = SyncScroll(webPage,
editorPositionToSourceLineFunc,
sourceLineToEditorPositionFunc)
ReTextWebPreview.__init__(self, tab.editBox)
settings = self.settings()
settings.setAttribute(QWebEngineSettings.LocalContentCanAccessFileUrls,
False)
def updateFontSettings(self):
settings = self.settings()
settings.setFontFamily(QWebEngineSettings.StandardFont,
globalSettings.font.family())
settings.setFontSize(QWebEngineSettings.DefaultFontSize,
globalSettings.font.pointSize())
def setHtml(self, html, baseUrl):
# A hack to prevent WebEngine from stealing the focus
self.setEnabled(False)
QWebEngineView.setHtml(self, html, baseUrl)
self.setEnabled(True)
def _handleWheelEvent(self, event):
# Only pass wheelEvents on to the preview if syncscroll is
# controlling the position of the preview
if self.syncscroll.isActive():
QGuiApplication.sendEvent(self.focusProxy(), event)
def event(self, event):
# Work-around https://bugreports.qt.io/browse/QTBUG-43602
if event.type() == QEvent.ChildAdded:
event.child().installEventFilter(self)
elif event.type() == QEvent.ChildRemoved:
event.child().removeEventFilter(self)
return super().event(event)
def eventFilter(self, object, event):
if event.type() == QEvent.Wheel:
if QGuiApplication.keyboardModifiers() == Qt.ControlModifier:
self.wheelEvent(event)
return True
return False
|
from pycfdns.exceptions import (
CloudflareAuthenticationException,
CloudflareConnectionException,
CloudflareZoneException,
)
from homeassistant.components.cloudflare.const import CONF_RECORDS, DOMAIN
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_API_TOKEN, CONF_SOURCE, CONF_ZONE
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from homeassistant.setup import async_setup_component
from . import (
ENTRY_CONFIG,
USER_INPUT,
USER_INPUT_RECORDS,
USER_INPUT_ZONE,
_patch_async_setup,
_patch_async_setup_entry,
)
from tests.common import MockConfigEntry
async def test_user_form(hass, cfupdate_flow):
"""Test we get the user initiated form."""
await async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "zone"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT_ZONE,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "records"
assert result["errors"] == {}
with _patch_async_setup() as mock_setup, _patch_async_setup_entry() as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT_RECORDS,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == USER_INPUT_ZONE[CONF_ZONE]
assert result["data"]
assert result["data"][CONF_API_TOKEN] == USER_INPUT[CONF_API_TOKEN]
assert result["data"][CONF_ZONE] == USER_INPUT_ZONE[CONF_ZONE]
assert result["data"][CONF_RECORDS] == USER_INPUT_RECORDS[CONF_RECORDS]
assert result["result"]
assert result["result"].unique_id == USER_INPUT_ZONE[CONF_ZONE]
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_user_form_cannot_connect(hass, cfupdate_flow):
"""Test we handle cannot connect error."""
instance = cfupdate_flow.return_value
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}
)
instance.get_zones.side_effect = CloudflareConnectionException()
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
async def test_user_form_invalid_auth(hass, cfupdate_flow):
"""Test we handle invalid auth error."""
instance = cfupdate_flow.return_value
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}
)
instance.get_zones.side_effect = CloudflareAuthenticationException()
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_auth"}
async def test_user_form_invalid_zone(hass, cfupdate_flow):
"""Test we handle invalid zone error."""
instance = cfupdate_flow.return_value
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}
)
instance.get_zones.side_effect = CloudflareZoneException()
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_zone"}
async def test_user_form_unexpected_exception(hass, cfupdate_flow):
"""Test we handle unexpected exception."""
instance = cfupdate_flow.return_value
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}
)
instance.get_zones.side_effect = Exception()
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "unknown"}
async def test_user_form_single_instance_allowed(hass):
"""Test that configuring more than one instance is rejected."""
entry = MockConfigEntry(domain=DOMAIN, data=ENTRY_CONFIG)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data=USER_INPUT,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
|
import pickle
import sys
import unittest
from absl import flags
from absl.testing import flagsaver
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import configs
from perfkitbenchmarker import linux_benchmarks
from perfkitbenchmarker import providers
from perfkitbenchmarker.configs import benchmark_config_spec
from tests import pkb_common_test_case
from perfkitbenchmarker.vpn_service import TunnelConfig
FLAGS = flags.FLAGS
PROJECT = 'mock_project'
CLOUD = providers.GCP
BENCHMARK_NAME = 'iperf'
URI = 'uri45678'
DEFAULT_CFG = """
# VPN iperf config.
iperf:
description: Run iperf over vpn
flags:
iperf_sending_thread_count: 5
use_vpn: True
vpn_service_gateway_count: 1
vpn_service:
tunnel_count: 1
ike_version: 2
routing_type: static
vm_groups:
vm_1:
cloud: GCP
cidr: 10.0.1.0/24
vm_spec:
GCP:
zone: us-west1-b
machine_type: n1-standard-4
vm_2:
cloud: GCP
cidr: 192.168.1.0/24
vm_spec:
GCP:
zone: us-central1-c
machine_type: n1-standard-4
"""
class BaseVPNServiceTest(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(BaseVPNServiceTest, self).setUp()
if not sys.warnoptions: # https://bugs.python.org/issue33154
import warnings
warnings.simplefilter('ignore', ResourceWarning)
def _CreateBenchmarkSpecFromYaml(self, yaml_string,
benchmark_name=BENCHMARK_NAME):
config = configs.LoadConfig(yaml_string, {}, benchmark_name)
return self._CreateBenchmarkSpecFromConfigDict(config, benchmark_name)
def _CreateBenchmarkSpecFromConfigDict(self, config_dict, benchmark_name):
config_spec = benchmark_config_spec.BenchmarkConfigSpec(benchmark_name,
flag_values=FLAGS,
**config_dict)
benchmark_module = next((b for b in linux_benchmarks.BENCHMARKS if
b.BENCHMARK_NAME == benchmark_name))
return benchmark_spec.BenchmarkSpec(benchmark_module, config_spec, URI)
def extractDictAFromB(self, A, B): # assertDictContainsSubset deprecated
return dict([(k, B[k]) for k in A.keys() if k in B.keys()])
class VpnServiceTestCase(BaseVPNServiceTest):
@flagsaver.flagsaver(use_vpn=True, vpn_service_gateway_count=1)
def testVpnServiceConfig(self):
spec = self._CreateBenchmarkSpecFromYaml(DEFAULT_CFG)
spec.ConstructVPNService()
# test global flags
self.assertTrue(spec.config.flags['use_vpn'])
self.assertEqual(spec.config.flags['vpn_service_gateway_count'], 1)
# test vpn_service flags
self.assertTrue(hasattr(spec, 'vpn_service'))
self.assertIsNot(spec.vpn_service, None)
self.assertEqual(spec.vpn_service.tunnel_count, 1)
self.assertEqual(spec.vpn_service.ike_version, 2)
self.assertEqual(spec.vpn_service.routing, 'static')
# test benchmark_spec attributes
self.assertTrue(hasattr(spec, 'vpn_gateways'))
self.assertIsNot(spec.vpn_gateways, None)
# test unpickled values for above
pspec = pickle.loads(pickle.dumps(spec))
self.assertTrue(pspec.config.flags['use_vpn'])
self.assertEqual(pspec.config.flags['vpn_service_gateway_count'], 1)
self.assertTrue(hasattr(pspec, 'vpn_service'))
self.assertIsNot(pspec.vpn_service, None)
self.assertEqual(pspec.vpn_service.tunnel_count, 1)
self.assertEqual(pspec.vpn_service.ike_version, 2)
self.assertEqual(pspec.vpn_service.routing, 'static')
self.assertTrue(hasattr(pspec, 'vpn_gateways'))
self.assertIsNot(pspec.vpn_gateways, None)
@flagsaver.flagsaver(use_vpn=True, vpn_service_gateway_count=1)
def testGetVPNGatewayPairs(self):
vpn_gateways = {
'vpngw-us-west1-0-None': None,
'vpngw-us-west1-1-None': None,
'vpngw-us-central1-0-None': None,
'vpngw-us-central1-1-None': None,
}
spec = self._CreateBenchmarkSpecFromYaml(DEFAULT_CFG)
spec.ConstructVPNService()
pairs = spec.vpn_service.GetVpnGatewayPairs(vpn_gateways)
self.assertEqual(len(pairs), 4)
# test unpickled values
pspec = pickle.loads(pickle.dumps(spec))
ppairs = pspec.vpn_service.GetVpnGatewayPairs(vpn_gateways)
self.assertEqual(len(ppairs), 4)
class TunnelConfigTestCase(BaseVPNServiceTest):
@flagsaver.flagsaver(run_uri=URI)
def testTunnelConfigHash(self):
ep1 = {
'name': 'ep1',
'ip': '1.2.3.4',
'cidr': '1.2.3.4/5',
'require_target_to_init': False,
'tunnel_id': '12345',
}
ep2 = {
'name': 'ep2',
'ip': '9.8.7.6',
'cidr': '9.8.7.6/5',
'require_target_to_init': False,
'tunnel_id': '98765',
}
endpoints = [ep1, ep2]
conf = {
'tunnel_name': 'tun1',
'ike_version': '3',
'routing': 'static',
'psk': 'private',
'endpoints': endpoints
}
tunnel_config = TunnelConfig()
tunnel_config2 = TunnelConfig()
hash1 = tunnel_config.hash()
hash2 = tunnel_config2.hash()
self.assertEqual(hash1, hash2)
tunnel_config.setConfig(**conf)
hash3 = tunnel_config.hash()
self.assertNotEqual(hash1, hash3)
if __name__ == '__main__':
unittest.main()
|
import asyncio
import logging
from typing import Any, Dict, Iterable, Optional
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
VALID_STATES = {STATE_ON, STATE_OFF}
async def _async_reproduce_state(
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in VALID_STATES:
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if cur_state.state == state.state:
return
service_data = {ATTR_ENTITY_ID: state.entity_id}
if state.state == STATE_ON:
service = SERVICE_TURN_ON
elif state.state == STATE_OFF:
service = SERVICE_TURN_OFF
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
async def async_reproduce_states(
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce Remote states."""
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
|
from homeassistant.components.cover import ATTR_POSITION, CoverEntity
from . import XiaomiDevice
from .const import DOMAIN, GATEWAYS_KEY
ATTR_CURTAIN_LEVEL = "curtain_level"
DATA_KEY_PROTO_V1 = "status"
DATA_KEY_PROTO_V2 = "curtain_status"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Perform the setup for Xiaomi devices."""
entities = []
gateway = hass.data[DOMAIN][GATEWAYS_KEY][config_entry.entry_id]
for device in gateway.devices["cover"]:
model = device["model"]
if model in ["curtain", "curtain.aq2", "curtain.hagl04"]:
if "proto" not in device or int(device["proto"][0:1]) == 1:
data_key = DATA_KEY_PROTO_V1
else:
data_key = DATA_KEY_PROTO_V2
entities.append(
XiaomiGenericCover(device, "Curtain", data_key, gateway, config_entry)
)
async_add_entities(entities)
class XiaomiGenericCover(XiaomiDevice, CoverEntity):
"""Representation of a XiaomiGenericCover."""
def __init__(self, device, name, data_key, xiaomi_hub, config_entry):
"""Initialize the XiaomiGenericCover."""
self._data_key = data_key
self._pos = 0
super().__init__(device, name, xiaomi_hub, config_entry)
@property
def current_cover_position(self):
"""Return the current position of the cover."""
return self._pos
@property
def is_closed(self):
"""Return if the cover is closed."""
return self.current_cover_position <= 0
def close_cover(self, **kwargs):
"""Close the cover."""
self._write_to_hub(self._sid, **{self._data_key: "close"})
def open_cover(self, **kwargs):
"""Open the cover."""
self._write_to_hub(self._sid, **{self._data_key: "open"})
def stop_cover(self, **kwargs):
"""Stop the cover."""
self._write_to_hub(self._sid, **{self._data_key: "stop"})
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
position = kwargs.get(ATTR_POSITION)
if self._data_key == DATA_KEY_PROTO_V2:
self._write_to_hub(self._sid, **{ATTR_CURTAIN_LEVEL: position})
else:
self._write_to_hub(self._sid, **{ATTR_CURTAIN_LEVEL: str(position)})
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
if ATTR_CURTAIN_LEVEL in data:
self._pos = int(data[ATTR_CURTAIN_LEVEL])
return True
return False
|
import numpy as np
class BacktestDataHandler(object):
"""
"""
def __init__(
self,
universe,
data_sources=None
):
self.universe = universe
self.data_sources = data_sources
def get_asset_latest_bid_price(self, dt, asset_symbol):
"""
"""
# TODO: Check for asset in Universe
bid = np.NaN
for ds in self.data_sources:
try:
bid = ds.get_bid(dt, asset_symbol)
if not np.isnan(bid):
return bid
except Exception:
bid = np.NaN
return bid
def get_asset_latest_ask_price(self, dt, asset_symbol):
"""
"""
# TODO: Check for asset in Universe
ask = np.NaN
for ds in self.data_sources:
try:
ask = ds.get_ask(dt, asset_symbol)
if not np.isnan(ask):
return ask
except Exception:
ask = np.NaN
return ask
def get_asset_latest_bid_ask_price(self, dt, asset_symbol):
"""
"""
# TODO: For the moment this is sufficient for OHLCV
# data, which only usually provides mid prices
# This will need to be revisited when handling intraday
# bid/ask time series.
# It has been added as an optimisation mechanism for
# interday backtests.
bid = self.get_asset_latest_bid_price(dt, asset_symbol)
return (bid, bid)
def get_asset_latest_mid_price(self, dt, asset_symbol):
"""
"""
bid_ask = self.get_asset_latest_bid_ask_price(dt, asset_symbol)
try:
mid = (bid_ask[0] + bid_ask[1]) / 2.0
except Exception:
# TODO: Log this
mid = np.NaN
return mid
def get_assets_historical_range_close_price(
self, start_dt, end_dt, asset_symbols, adjusted=False
):
"""
"""
prices_df = None
for ds in self.data_sources:
try:
prices_df = ds.get_assets_historical_closes(
start_dt, end_dt, asset_symbols, adjusted=adjusted
)
if prices_df is not None:
return prices_df
except Exception:
raise
return prices_df
|
from typing import Any, Dict, List, Optional
from homeassistant.components.climate import DOMAIN as CLIMATE_DOMAIN, ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_HEAT,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from . import DOMAIN, IncomfortChild
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up an InComfort/InTouch climate device."""
if discovery_info is None:
return
client = hass.data[DOMAIN]["client"]
heaters = hass.data[DOMAIN]["heaters"]
async_add_entities(
[InComfortClimate(client, h, r) for h in heaters for r in h.rooms]
)
class InComfortClimate(IncomfortChild, ClimateEntity):
"""Representation of an InComfort/InTouch climate device."""
def __init__(self, client, heater, room) -> None:
"""Initialize the climate device."""
super().__init__()
self._unique_id = f"{heater.serial_no}_{room.room_no}"
self.entity_id = f"{CLIMATE_DOMAIN}.{DOMAIN}_{room.room_no}"
self._name = f"Thermostat {room.room_no}"
self._client = client
self._room = room
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the device state attributes."""
return {"status": self._room.status}
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode."""
return HVAC_MODE_HEAT
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
return [HVAC_MODE_HEAT]
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return self._room.room_temp
@property
def target_temperature(self) -> Optional[float]:
"""Return the temperature we try to reach."""
return self._room.setpoint
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_TARGET_TEMPERATURE
@property
def min_temp(self) -> float:
"""Return max valid temperature that can be set."""
return 5.0
@property
def max_temp(self) -> float:
"""Return max valid temperature that can be set."""
return 30.0
async def async_set_temperature(self, **kwargs) -> None:
"""Set a new target temperature for this zone."""
temperature = kwargs.get(ATTR_TEMPERATURE)
await self._room.set_override(temperature)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
|
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, Tuple, Union, cast
from redbot import VersionInfo, version_info as red_version_info
from . import installable
from .log import log
if TYPE_CHECKING:
from .json_mixins import RepoJSONMixin
__all__ = ("REPO_SCHEMA", "INSTALLABLE_SCHEMA", "update_mixin")
class UseDefault:
"""To be used as sentinel."""
# sentinel value
USE_DEFAULT = UseDefault()
def ensure_tuple_of_str(
info_file: Path, key_name: str, value: Union[Any, UseDefault]
) -> Tuple[str, ...]:
default: Tuple[str, ...] = ()
if value is USE_DEFAULT:
return default
if not isinstance(value, list):
log.warning(
"Invalid value of '%s' key (expected list, got %s)"
" in JSON information file at path: %s",
key_name,
type(value).__name__,
info_file,
)
return default
for item in value:
if not isinstance(item, str):
log.warning(
"Invalid item in '%s' list (expected str, got %s)"
" in JSON information file at path: %s",
key_name,
type(item).__name__,
info_file,
)
return default
return tuple(value)
def ensure_str(info_file: Path, key_name: str, value: Union[Any, UseDefault]) -> str:
default = ""
if value is USE_DEFAULT:
return default
if not isinstance(value, str):
log.warning(
"Invalid value of '%s' key (expected str, got %s)"
" in JSON information file at path: %s",
key_name,
type(value).__name__,
info_file,
)
return default
return value
def ensure_red_version_info(
info_file: Path, key_name: str, value: Union[Any, UseDefault]
) -> VersionInfo:
default = red_version_info
if value is USE_DEFAULT:
return default
if not isinstance(value, str):
log.warning(
"Invalid value of '%s' key (expected str, got %s)"
" in JSON information file at path: %s",
key_name,
type(value).__name__,
info_file,
)
return default
try:
version_info = VersionInfo.from_str(value)
except ValueError:
log.warning(
"Invalid value of '%s' key (given value isn't a valid version string)"
" in JSON information file at path: %s",
key_name,
info_file,
)
return default
return version_info
def ensure_python_version_info(
info_file: Path, key_name: str, value: Union[Any, UseDefault]
) -> Tuple[int, int, int]:
default = (3, 5, 1)
if value is USE_DEFAULT:
return default
if not isinstance(value, list):
log.warning(
"Invalid value of '%s' key (expected list, got %s)"
" in JSON information file at path: %s",
key_name,
type(value).__name__,
info_file,
)
return default
count = len(value)
if count != 3:
log.warning(
"Invalid value of '%s' key (expected list with 3 items, got %s items)"
" in JSON information file at path: %s",
key_name,
count,
info_file,
)
return default
for item in value:
if not isinstance(item, int):
log.warning(
"Invalid item in '%s' list (expected int, got %s)"
" in JSON information file at path: %s",
key_name,
type(item).__name__,
info_file,
)
return default
return cast(Tuple[int, int, int], tuple(value))
def ensure_bool(
info_file: Path, key_name: str, value: Union[Any, UseDefault], *, default: bool = False
) -> bool:
if value is USE_DEFAULT:
return default
if not isinstance(value, bool):
log.warning(
"Invalid value of '%s' key (expected bool, got %s)"
" in JSON information file at path: %s",
key_name,
type(value).__name__,
info_file,
)
return default
return value
def ensure_required_cogs_mapping(
info_file: Path, key_name: str, value: Union[Any, UseDefault]
) -> Dict[str, str]:
default: Dict[str, str] = {}
if value is USE_DEFAULT:
return default
if not isinstance(value, dict):
log.warning(
"Invalid value of '%s' key (expected dict, got %s)"
" in JSON information file at path: %s",
key_name,
type(value).__name__,
info_file,
)
return default
# keys in json dicts are always strings
for item in value.values():
if not isinstance(item, str):
log.warning(
"Invalid item in '%s' dict (expected str, got %s)"
" in JSON information file at path: %s",
key_name,
type(item).__name__,
info_file,
)
return default
return value
def ensure_installable_type(
info_file: Path, key_name: str, value: Union[Any, UseDefault]
) -> installable.InstallableType:
default = installable.InstallableType.COG
if value is USE_DEFAULT:
return default
if not isinstance(value, str):
log.warning(
"Invalid value of '%s' key (expected str, got %s)"
" in JSON information file at path: %s",
key_name,
type(value).__name__,
info_file,
)
return default # NOTE: old behavior was to use InstallableType.UNKNOWN
if value in ("", "COG"):
return installable.InstallableType.COG
if value == "SHARED_LIBRARY":
return installable.InstallableType.SHARED_LIBRARY
return installable.InstallableType.UNKNOWN
EnsureCallable = Callable[[Path, str, Union[Any, UseDefault]], Any]
SchemaType = Dict[str, EnsureCallable]
REPO_SCHEMA: SchemaType = {
"author": ensure_tuple_of_str,
"description": ensure_str,
"install_msg": ensure_str,
"short": ensure_str,
}
INSTALLABLE_SCHEMA: SchemaType = {
"min_bot_version": ensure_red_version_info,
"max_bot_version": ensure_red_version_info,
"min_python_version": ensure_python_version_info,
"hidden": ensure_bool,
"disabled": ensure_bool,
"required_cogs": ensure_required_cogs_mapping,
"requirements": ensure_tuple_of_str,
"tags": ensure_tuple_of_str,
"type": ensure_installable_type,
"end_user_data_statement": ensure_str,
}
def update_mixin(repo_or_installable: RepoJSONMixin, schema: SchemaType) -> None:
info = repo_or_installable._info
info_file = repo_or_installable._info_file
for key, callback in schema.items():
setattr(repo_or_installable, key, callback(info_file, key, info.get(key, USE_DEFAULT)))
|
import logging
from homeassistant.const import PERCENTAGE
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from . import (
ATTR_ABV,
ATTR_BATCH_VOLUME,
ATTR_BPM,
ATTR_CO2_VOLUME,
ATTR_TEMP,
ATTR_TEMP_UNIT,
ATTR_VOLUME_UNIT,
DOMAIN as PLAATO_DOMAIN,
PLAATO_DEVICE_ATTRS,
PLAATO_DEVICE_SENSORS,
SENSOR_DATA_KEY,
SENSOR_UPDATE,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Plaato sensor."""
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Plaato from a config entry."""
devices = {}
def get_device(device_id):
"""Get a device."""
return hass.data[PLAATO_DOMAIN].get(device_id, False)
def get_device_sensors(device_id):
"""Get device sensors."""
return hass.data[PLAATO_DOMAIN].get(device_id).get(PLAATO_DEVICE_SENSORS)
async def _update_sensor(device_id):
"""Update/Create the sensors."""
if device_id not in devices and get_device(device_id):
entities = []
sensors = get_device_sensors(device_id)
for sensor_type in sensors:
entities.append(PlaatoSensor(device_id, sensor_type))
devices[device_id] = entities
async_add_entities(entities, True)
else:
for entity in devices[device_id]:
async_dispatcher_send(hass, f"{PLAATO_DOMAIN}_{entity.unique_id}")
hass.data[SENSOR_DATA_KEY] = async_dispatcher_connect(
hass, SENSOR_UPDATE, _update_sensor
)
return True
class PlaatoSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, device_id, sensor_type):
"""Initialize the sensor."""
self._device_id = device_id
self._type = sensor_type
self._state = 0
self._name = f"{device_id} {sensor_type}"
self._attributes = None
@property
def name(self):
"""Return the name of the sensor."""
return f"{PLAATO_DOMAIN} {self._name}"
@property
def unique_id(self):
"""Return the unique ID of this sensor."""
return f"{self._device_id}_{self._type}"
@property
def device_info(self):
"""Get device info."""
return {
"identifiers": {(PLAATO_DOMAIN, self._device_id)},
"name": self._device_id,
"manufacturer": "Plaato",
"model": "Airlock",
}
def get_sensors(self):
"""Get device sensors."""
return (
self.hass.data[PLAATO_DOMAIN]
.get(self._device_id)
.get(PLAATO_DEVICE_SENSORS, False)
)
def get_sensors_unit_of_measurement(self, sensor_type):
"""Get unit of measurement for sensor of type."""
return (
self.hass.data[PLAATO_DOMAIN]
.get(self._device_id)
.get(PLAATO_DEVICE_ATTRS, [])
.get(sensor_type, "")
)
@property
def state(self):
"""Return the state of the sensor."""
sensors = self.get_sensors()
if sensors is False:
_LOGGER.debug("Device with name %s has no sensors", self.name)
return 0
if self._type == ATTR_ABV:
return round(sensors.get(self._type), 2)
if self._type == ATTR_TEMP:
return round(sensors.get(self._type), 1)
if self._type == ATTR_CO2_VOLUME:
return round(sensors.get(self._type), 2)
return sensors.get(self._type)
@property
def device_state_attributes(self):
"""Return the state attributes of the monitored installation."""
if self._attributes is not None:
return self._attributes
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
if self._type == ATTR_TEMP:
return self.get_sensors_unit_of_measurement(ATTR_TEMP_UNIT)
if self._type == ATTR_BATCH_VOLUME or self._type == ATTR_CO2_VOLUME:
return self.get_sensors_unit_of_measurement(ATTR_VOLUME_UNIT)
if self._type == ATTR_BPM:
return "bpm"
if self._type == ATTR_ABV:
return PERCENTAGE
return ""
@property
def should_poll(self):
"""Return the polling state."""
return False
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
f"{PLAATO_DOMAIN}_{self.unique_id}", self.async_write_ha_state
)
)
|
from tests.components.homekit_controller.common import (
Helper,
setup_accessories_from_file,
setup_test_accessories,
)
async def test_ecobee_occupancy_setup(hass):
"""Test that an Ecbobee occupancy sensor be correctly setup in HA."""
accessories = await setup_accessories_from_file(hass, "ecobee_occupancy.json")
config_entry, pairing = await setup_test_accessories(hass, accessories)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
sensor = entity_registry.async_get("binary_sensor.master_fan")
assert sensor.unique_id == "homekit-111111111111-56"
sensor_helper = Helper(
hass, "binary_sensor.master_fan", pairing, accessories[0], config_entry
)
sensor_state = await sensor_helper.poll_and_get_state()
assert sensor_state.attributes["friendly_name"] == "Master Fan"
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(sensor.device_id)
assert device.manufacturer == "ecobee Inc."
assert device.name == "Master Fan"
assert device.model == "ecobee Switch+"
assert device.sw_version == "4.5.130201"
assert device.via_device_id is None
|
import logging
from pylutron import Button, Lutron
import voluptuous as vol
from homeassistant.const import ATTR_ID, CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
DOMAIN = "lutron"
_LOGGER = logging.getLogger(__name__)
LUTRON_BUTTONS = "lutron_buttons"
LUTRON_CONTROLLER = "lutron_controller"
LUTRON_DEVICES = "lutron_devices"
# Attribute on events that indicates what action was taken with the button.
ATTR_ACTION = "action"
ATTR_FULL_ID = "full_id"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, base_config):
"""Set up the Lutron component."""
hass.data[LUTRON_BUTTONS] = []
hass.data[LUTRON_CONTROLLER] = None
hass.data[LUTRON_DEVICES] = {
"light": [],
"cover": [],
"switch": [],
"scene": [],
"binary_sensor": [],
}
config = base_config.get(DOMAIN)
hass.data[LUTRON_CONTROLLER] = Lutron(
config[CONF_HOST], config[CONF_USERNAME], config[CONF_PASSWORD]
)
hass.data[LUTRON_CONTROLLER].load_xml_db()
hass.data[LUTRON_CONTROLLER].connect()
_LOGGER.info("Connected to main repeater at %s", config[CONF_HOST])
# Sort our devices into types
for area in hass.data[LUTRON_CONTROLLER].areas:
for output in area.outputs:
if output.type == "SYSTEM_SHADE":
hass.data[LUTRON_DEVICES]["cover"].append((area.name, output))
elif output.is_dimmable:
hass.data[LUTRON_DEVICES]["light"].append((area.name, output))
else:
hass.data[LUTRON_DEVICES]["switch"].append((area.name, output))
for keypad in area.keypads:
for button in keypad.buttons:
# If the button has a function assigned to it, add it as a scene
if button.name != "Unknown Button" and button.button_type in (
"SingleAction",
"Toggle",
"SingleSceneRaiseLower",
"MasterRaiseLower",
):
# Associate an LED with a button if there is one
led = next(
(led for led in keypad.leds if led.number == button.number),
None,
)
hass.data[LUTRON_DEVICES]["scene"].append(
(area.name, keypad.name, button, led)
)
hass.data[LUTRON_BUTTONS].append(
LutronButton(hass, area.name, keypad, button)
)
if area.occupancy_group is not None:
hass.data[LUTRON_DEVICES]["binary_sensor"].append(
(area.name, area.occupancy_group)
)
for component in ("light", "cover", "switch", "scene", "binary_sensor"):
discovery.load_platform(hass, component, DOMAIN, {}, base_config)
return True
class LutronDevice(Entity):
"""Representation of a Lutron device entity."""
def __init__(self, area_name, lutron_device, controller):
"""Initialize the device."""
self._lutron_device = lutron_device
self._controller = controller
self._area_name = area_name
async def async_added_to_hass(self):
"""Register callbacks."""
self.hass.async_add_executor_job(
self._lutron_device.subscribe, self._update_callback, None
)
def _update_callback(self, _device, _context, _event, _params):
"""Run when invoked by pylutron when the device state changes."""
self.schedule_update_ha_state()
@property
def name(self):
"""Return the name of the device."""
return f"{self._area_name} {self._lutron_device.name}"
@property
def should_poll(self):
"""No polling needed."""
return False
class LutronButton:
"""Representation of a button on a Lutron keypad.
This is responsible for firing events as keypad buttons are pressed
(and possibly released, depending on the button type). It is not
represented as an entity; it simply fires events.
"""
def __init__(self, hass, area_name, keypad, button):
"""Register callback for activity on the button."""
name = f"{keypad.name}: {button.name}"
self._hass = hass
self._has_release_event = (
button.button_type is not None and "RaiseLower" in button.button_type
)
self._id = slugify(name)
self._keypad = keypad
self._area_name = area_name
self._button_name = button.name
self._button = button
self._event = "lutron_event"
self._full_id = slugify(f"{area_name} {keypad.name}: {button.name}")
button.subscribe(self.button_callback, None)
def button_callback(self, button, context, event, params):
"""Fire an event about a button being pressed or released."""
# Events per button type:
# RaiseLower -> pressed/released
# SingleAction -> single
action = None
if self._has_release_event:
if event == Button.Event.PRESSED:
action = "pressed"
else:
action = "released"
elif event == Button.Event.PRESSED:
action = "single"
if action:
data = {ATTR_ID: self._id, ATTR_ACTION: action, ATTR_FULL_ID: self._full_id}
self._hass.bus.fire(self._event, data)
|
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.db.models import Count
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.http import urlencode
from django.utils.translation import gettext as _
from django.utils.translation import ngettext
from django.views.decorators.cache import never_cache
from django.views.decorators.http import require_POST
from weblate.formats.models import EXPORTERS
from weblate.glossary.forms import (
GlossaryForm,
GlossaryUploadForm,
LetterForm,
OneTermForm,
TermForm,
)
from weblate.glossary.models import Glossary, Term
from weblate.lang.models import Language
from weblate.trans.models import Change, Unit
from weblate.trans.util import redirect_next, render, sort_objects
from weblate.utils import messages
from weblate.utils.errors import report_error
from weblate.utils.ratelimit import session_ratelimit_post
from weblate.utils.site import get_site_url
from weblate.utils.views import get_paginator, get_project, import_message
EXPORT_TYPES = ("csv", "po", "tbx", "xliff")
def dict_title(prj, lang):
"""Return glossary title."""
return _("%(language)s glossary for %(project)s") % {
"language": lang,
"project": prj,
}
class LanguageGlossary:
"""Wrapper object for listing glossaries per language."""
def __init__(self, project, post_data, user):
self.project = project
# The for_project can contain duplicates and we need to avoid that
# for the annotate query later
glossary_ids = set(
Glossary.objects.for_project(project).values_list("id", flat=True)
)
self.glossaries = Glossary.objects.filter(pk__in=glossary_ids).order_by("name")
self.data = {
(item["term__language"], item["pk"]): item["term__count"]
for item in self.glossaries.values("term__language", "pk").annotate(
Count("term")
)
}
try:
self.post_id = int(post_data.get("edit_glossary", -1))
except ValueError:
self.post_id = -1
self.languages = sort_objects(
Language.objects.filter(translation__component__project=project).distinct()
)
self.forms = {
glossary.id: GlossaryForm(
user,
glossary.project,
post_data if self.post_id == glossary.id else None,
instance=glossary,
auto_id=f"id_edit_{glossary.id}_%s",
)
for glossary in self.glossaries
}
def get_edited_glossary(self):
return self.glossaries.get(pk=self.post_id)
def get_glossaries(self):
for glossary in self.glossaries:
glossaries = [
{
"language": language,
"count": self.data.get((language.pk, glossary.pk), 0),
}
for language in self.languages
]
yield {
"glossary": glossary,
"form": self.forms[glossary.id],
"count": sum(g["count"] for g in glossaries),
"glossaries": glossaries,
}
@never_cache
def show_glossaries(request, project):
obj = get_project(request, project)
language_glossaries = LanguageGlossary(obj, request.POST, request.user)
try:
initial = {"source_language": obj.component_set.all()[0].source_language}
except IndexError:
initial = {}
new_form = GlossaryForm(request.user, obj, initial=initial)
if request.method == "POST" and request.user.has_perm("project.edit", obj):
if "delete_glossary" in request.POST:
try:
glossary = language_glossaries.glossaries.get(
pk=int(request.POST["delete_glossary"])
)
glossary.delete()
return redirect("show_glossaries", project=obj.slug)
except (Glossary.DoesNotExist, ValueError):
messages.error(request, _("Glossary was not found."))
elif language_glossaries.post_id == -1:
new_form = GlossaryForm(request.user, obj, data=request.POST)
if new_form.is_valid():
new_form.instance.project = obj
new_form.save()
return redirect("show_glossaries", project=obj.slug)
else:
try:
glossary = language_glossaries.get_edited_glossary()
form = language_glossaries.forms[glossary.id]
if form.is_valid():
form.save()
return redirect("show_glossaries", project=obj.slug)
except Glossary.DoesNotExist:
messages.error(request, _("Glossary was not found."))
return render(
request,
"glossaries.html",
{
"title": _("Glossaries"),
"object": obj,
"language_glossaries": language_glossaries.get_glossaries(),
"project": obj,
"new_form": new_form,
},
)
@never_cache
def edit_glossary(request, pk):
term = get_object_or_404(Term, id=pk)
if not term.check_perm(request.user, "glossary.edit"):
raise PermissionDenied()
if request.method == "POST":
form = TermForm(term.glossary.project, data=request.POST, instance=term)
if form.is_valid():
term.edit(
request,
form.cleaned_data["source"],
form.cleaned_data["target"],
form.cleaned_data["glossary"],
)
return redirect_next(
request.POST.get("next"),
reverse(
"show_glossary",
kwargs={
"project": term.glossary.project.slug,
"lang": term.language.code,
},
),
)
else:
form = TermForm(term.glossary.project, instance=term)
last_changes = Change.objects.last_changes(request.user).filter(glossary_term=term)[
:10
]
return render(
request,
"edit_glossary.html",
{
"title": dict_title(term.glossary, term.language),
"project": term.glossary.project,
"language": term.language,
"form": form,
"next": request.POST.get("next") or request.GET.get("next"),
"last_changes": last_changes,
"last_changes_url": urlencode(
(
("project", term.glossary.project.slug),
("lang", term.language.code),
("action", Change.ACTION_DICTIONARY_NEW),
("action", Change.ACTION_DICTIONARY_EDIT),
("action", Change.ACTION_DICTIONARY_UPLOAD),
)
),
},
)
@require_POST
@login_required
def delete_glossary(request, pk):
term = get_object_or_404(Term, id=pk)
if not term.check_perm(request.user, "glossary.delete"):
raise PermissionDenied()
term.delete()
return redirect_next(
request.POST.get("next"),
reverse(
"show_glossary",
kwargs={"project": term.glossary.project.slug, "lang": term.language.code},
),
)
@require_POST
@login_required
@session_ratelimit_post("glossary")
def upload_glossary(request, project, lang):
prj = get_project(request, project)
if not request.user.has_perm("glossary.upload", prj):
raise PermissionDenied()
lang = get_object_or_404(Language, code=lang)
form = GlossaryUploadForm(prj, request.POST, request.FILES)
if form.is_valid():
try:
count = Term.objects.upload(
request,
form.cleaned_data["glossary"],
lang,
request.FILES["file"],
form.cleaned_data["method"],
)
import_message(
request,
count,
_("No terms to import found in file."),
ngettext(
"Imported %d term from the uploaded file.",
"Imported %d terms from the uploaded file.",
count,
),
)
except Exception as error:
report_error(cause="Failed to handle upload")
messages.error(request, _("File upload has failed: %s") % error)
else:
messages.error(request, _("Failed to process form!"))
return redirect("show_glossary", project=prj.slug, lang=lang.code)
@never_cache
def download_glossary(request, project, lang):
"""Export glossary into various formats."""
prj = get_project(request, project)
lang = get_object_or_404(Language, code=lang)
# Parse parameters
export_format = None
if "format" in request.GET:
export_format = request.GET["format"]
if export_format not in EXPORT_TYPES:
export_format = "csv"
# Grab all terms
terms = (
Term.objects.for_project(prj)
.filter(language=lang)
.prefetch_related("glossary")
.order()
)
# Translate toolkit based export
exporter = EXPORTERS[export_format](
project=prj,
language=lang,
source_language=terms[0].glossary.source_language
if terms
else Language.objects.default_language,
url=get_site_url(
reverse("show_glossary", kwargs={"project": prj.slug, "lang": lang.code})
),
fieldnames=("source", "target"),
)
# Add terms
for term in terms:
exporter.add_glossary_term(term)
# Save to response
return exporter.get_response("glossary-{project}-{language}.{extension}")
@require_POST
@login_required
@session_ratelimit_post("glossary")
def add_glossary_term(request, unit_id):
unit = get_object_or_404(Unit, pk=int(unit_id))
request.user.check_access_component(unit.translation.component)
component = unit.translation.component
prj = component.project
lang = unit.translation.language
code = 403
results = ""
terms = []
if request.user.has_perm("glossary.add", prj):
form = TermForm(prj, request.POST)
if form.is_valid():
term = Term.objects.create(
request.user,
language=lang,
source=form.cleaned_data["source"],
target=form.cleaned_data["target"],
glossary=form.cleaned_data["glossary"],
)
terms = form.cleaned_data["terms"]
terms.append(term.id)
code = 200
results = render_to_string(
"snippets/glossary.html",
{
"glossary": (
Term.objects.get_terms(unit).order()
| Term.objects.for_project(prj, component.source_language)
.filter(pk__in=terms)
.order()
),
"unit": unit,
"user": request.user,
},
)
return JsonResponse(
data={
"responseCode": code,
"results": results,
"terms": ",".join(str(x) for x in terms),
}
)
@never_cache
@session_ratelimit_post("glossary")
def show_glossary(request, project, lang):
prj = get_project(request, project)
lang = get_object_or_404(Language, code=lang)
if request.method == "POST" and request.user.has_perm("glossary.add", prj):
form = TermForm(prj, request.POST)
if form.is_valid():
Term.objects.create(
request.user,
language=lang,
source=form.cleaned_data["source"],
target=form.cleaned_data["target"],
glossary=form.cleaned_data["glossary"],
)
return redirect_next(request.POST.get("next"), request.get_full_path())
form = TermForm(prj)
uploadform = GlossaryUploadForm(prj)
terms = Term.objects.for_project(prj).filter(language=lang).order()
letterform = LetterForm(request.GET)
searchform = OneTermForm(request.GET)
if searchform.is_valid() and searchform.cleaned_data["term"] != "":
terms = terms.filter(source__substring=searchform.cleaned_data["term"])
search = searchform.cleaned_data["term"]
else:
search = ""
if letterform.is_valid() and letterform.cleaned_data["letter"] != "":
terms = terms.filter(source__istartswith=letterform.cleaned_data["letter"])
letter = letterform.cleaned_data["letter"]
else:
letter = ""
terms = get_paginator(request, terms)
last_changes = (
Change.objects.last_changes(request.user)
.filter(project=prj, language=lang)
.exclude(glossary_term=None)[:10]
)
exporters = EXPORTERS.list_exporters_filter(EXPORT_TYPES)
return render(
request,
"glossary.html",
{
"title": dict_title(prj, lang),
"project": prj,
"language": lang,
"page_obj": terms,
"exporters": exporters,
"form": form,
"query_string": urlencode({"term": search, "letter": letter}),
"uploadform": uploadform,
"letterform": letterform,
"searchform": searchform,
"letter": letter,
"last_changes": last_changes,
"last_changes_url": urlencode(
(
("project", prj.slug),
("lang", lang.code),
("action", Change.ACTION_DICTIONARY_NEW),
("action", Change.ACTION_DICTIONARY_EDIT),
("action", Change.ACTION_DICTIONARY_UPLOAD),
)
),
},
)
|
from __future__ import division
import warnings
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from .._shared.helpers import *
from .._shared.params import default_params
from ..tools.analyze import analyze
from ..tools.cluster import cluster as clusterer
from ..tools.reduce import reduce as reducer
from ..tools.format_data import format_data
from .draw import _draw
from ..datageometry import DataGeometry
def plot(x, fmt='-', marker=None, markers=None, linestyle=None, linestyles=None,
color=None, colors=None, palette='hls', group=None, hue=None,
labels=None, legend=None, title=None, size=None, elev=10, azim=-60,
ndims=3, model=None, model_params=None, reduce='IncrementalPCA',
cluster=None, align=None, normalize=None, n_clusters=None,
save_path=None, animate=False, duration=30, tail_duration=2,
rotations=2, zoom=1, chemtrails=False, precog=False, bullettime=False,
frame_rate=50, explore=False, show=True, transform=None,
vectorizer='CountVectorizer', semantic='LatentDirichletAllocation',
corpus='wiki', ax=None):
"""
Plots dimensionality reduced data and parses plot arguments
Parameters
----------
x : Numpy array, DataFrame, String, Geo or mixed list
Data for the plot. The form should be samples (rows) by features (cols).
fmt : str or list of strings
A list of format strings. All matplotlib format strings are supported.
linestyle(s) : str or list of str
A list of line styles
marker(s) : str or list of str
A list of marker types
color(s) : str or list of str
A list of marker types
palette : str
A matplotlib or seaborn color palette
group : str/int/float or list
A list of group labels. Length must match the number of rows in your
dataset. If the data type is numerical, the values will be mapped to
rgb values in the specified palette. If the data type is strings,
the points will be labeled categorically. To label a subset of points,
use None (i.e. ['a', None, 'b','a']).
labels : list
A list of labels for each point. Must be dimensionality of data (x).
If no label is wanted for a particular point, input None.
legend : list or bool
If set to True, legend is implicitly computed from data. Passing a
list will add string labels to the legend (one for each list item).
title : str
A title for the plot
size : list
A list of [width, height] in inches to resize the figure
normalize : str or False
If set to 'across', the columns of the input data will be z-scored
across lists (default). If set to 'within', the columns will be
z-scored within each list that is passed. If set to 'row', each row of
the input data will be z-scored. If set to False, the input data will
be returned (default is False).
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
ndims : int
An `int` representing the number of dims to reduce the data x
to. If ndims > 3, will plot in 3 dimensions but return the higher
dimensional data. Default is None, which will plot data in 3
dimensions and return the data with the same number of dimensions
possibly normalized and/or aligned according to normalize/align
kwargs.
align : str or dict or False/None
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
cluster : str or dict or False/None
If cluster is passed, HyperTools will perform clustering using the
specified clustering clustering model. Supportted algorithms are:
KMeans, MiniBatchKMeans, AgglomerativeClustering, Birch,
FeatureAgglomeration, SpectralClustering and HDBSCAN (default: None).
Can be passed as a string, but for finer control of the model
parameters, pass as a dictionary, e.g.
reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}. See
scikit-learn specific model docs for details on parameters supported for
each model. If no parameters are specified in the string a default set
of parameters will be used.
n_clusters : int
If n_clusters is passed, HyperTools will perform k-means clustering
with the k parameter set to n_clusters. The resulting clusters will
be plotted in different colors according to the color palette.
save_path : str
Path to save the image/movie. Must include the file extension in the
save path (i.e. save_path='/path/to/file/image.png'). NOTE: If saving
an animation, FFMPEG must be installed (this is a matplotlib req).
FFMPEG can be easily installed on a mac via homebrew brew install
ffmpeg or linux via apt-get apt-get install ffmpeg. If you don't
have homebrew (mac only), you can install it like this:
/usr/bin/ruby -e "$(curl -fsSL
https://raw.githubusercontent.com/Homebrew/install/master/install)".
animate : bool, 'parallel' or 'spin'
If True or 'parallel', plots the data as an animated trajectory, with
each dataset plotted simultaneously. If 'spin', all the data is plotted
at once but the camera spins around the plot (default: False).
duration (animation only) : float
Length of the animation in seconds (default: 30 seconds)
tail_duration (animation only) : float
Sets the length of the tail of the data (default: 2 seconds)
rotations (animation only) : float
Number of rotations around the box (default: 2)
zoom (animation only) : float
How far to zoom into the plot, positive numbers will zoom in (default: 0)
chemtrails (animation only) : bool
A low-opacity trail is left behind the trajectory (default: False).
precog (animation only) : bool
A low-opacity trail is plotted ahead of the trajectory (default: False).
bullettime (animation only) : bool
A low-opacity trail is plotted ahead and behind the trajectory
(default: False).
frame_rate (animation only) : int or float
Frame rate for animation (default: 50)
explore : bool
Displays user defined labels will appear on hover. If no labels are
passed, the point index and coordinate will be plotted. To use,
set explore=True. Note: Explore mode is currently only supported
for 3D static plots, and is an experimental feature (i.e it may not yet
work properly).
show : bool
If set to False, the figure will not be displayed, but the figure,
axis and data objects will still be returned (default: True).
transform : list of numpy arrays or None
The transformed data, bypasses transformations if this is set
(default : None).
vectorizer : str, dict, class or class instance
The vectorizer to use. Built-in options are 'CountVectorizer' or
'TfidfVectorizer'. To change default parameters, set to a dictionary
e.g. {'model' : 'CountVectorizer', 'params' : {'max_features' : 10}}. See
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.feature_extraction.text
for details. You can also specify your own vectorizer model as a class,
or class instance. With either option, the class must have a
fit_transform method (see here: http://scikit-learn.org/stable/data_transforms.html).
If a class, pass any parameters as a dictionary to vectorizer_params. If
a class instance, no parameters can be passed.
semantic : str, dict, class or class instance
Text model to use to transform text data. Built-in options are
'LatentDirichletAllocation' or 'NMF' (default: LDA). To change default
parameters, set to a dictionary e.g. {'model' : 'NMF', 'params' :
{'n_components' : 10}}. See
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition
for details on the two model options. You can also specify your own
text model as a class, or class instance. With either option, the class
must have a fit_transform method (see here:
http://scikit-learn.org/stable/data_transforms.html).
If a class, pass any parameters as a dictionary to text_params. If
a class instance, no parameters can be passed.
corpus : list (or list of lists) of text samples or 'wiki', 'nips', 'sotus'.
Text to use to fit the semantic model (optional). If set to 'wiki', 'nips'
or 'sotus' and the default semantic and vectorizer models are used, a
pretrained model will be loaded which can save a lot of time.
ax : matplotlib.Axes
Axis handle to plot the figure
Returns
----------
geo : hypertools.DataGeometry
A new data geometry object
"""
# warnings for deprecated API args
if (model is not None) or (model_params is not None):
warnings.warn('Model and model_params arguments will be deprecated. Please use \
reduce keyword argument. See docs for details: http://hypertools.readthedocs.io/en/latest/hypertools.plot.html#hypertools.plot')
reduce = {}
reduce['model'] = model
reduce['params'] = model_params
if group is not None:
warnings.warn('Group will be deprecated. Please use '
'hue keyword argument. See docs for details: ' 'http://hypertools.readthedocs.io/en/latest/hypertools.plot.html#hypertools.plot')
hue = group
if ax is not None:
if ndims>2:
if ax.name!='3d':
raise ValueError('If passing ax and the plot is 3D, ax must '
'also be 3d')
text_args = {
'vectorizer' : vectorizer,
'semantic' : semantic,
'corpus' : corpus
}
# analyze the data
if transform is None:
raw = format_data(x, **text_args)
xform = analyze(raw, ndims=ndims, normalize=normalize, reduce=reduce,
align=align, internal=True)
else:
xform = transform
# Return data that has been normalized and possibly reduced and/or aligned
xform_data = copy.copy(xform)
# catch all matplotlib kwargs here to pass on
mpl_kwargs = {}
# handle color (to be passed onto matplotlib)
if color is not None:
mpl_kwargs['color'] = color
if colors is not None:
mpl_kwargs['color'] = colors
warnings.warn('Both color and colors defined: color will be ignored \
in favor of colors.')
# handle linestyle (to be passed onto matplotlib)
if linestyle is not None:
mpl_kwargs['linestyle'] = linestyle
if linestyles is not None:
mpl_kwargs['linestyle'] = linestyles
warnings.warn('Both linestyle and linestyles defined: linestyle \
will be ignored in favor of linestyles.')
# handle marker (to be passed onto matplotlib)
if marker is not None:
mpl_kwargs['marker'] = marker
if markers is not None:
mpl_kwargs['marker'] = markers
warnings.warn('Both marker and markers defined: marker will be \
ignored in favor of markers.')
# reduce data to 3 dims for plotting, if ndims is None, return this
if (ndims and ndims < 3):
xform = reducer(xform, ndims=ndims, reduce=reduce, internal=True)
else:
xform = reducer(xform, ndims=3, reduce=reduce, internal=True)
# find cluster and reshape if n_clusters
if cluster is not None:
if hue is not None:
warnings.warn('cluster overrides hue, ignoring hue.')
if isinstance(cluster, (six.string_types, six.binary_type)):
model = cluster
params = default_params(model)
elif isinstance(cluster, dict):
model = cluster['model']
params = default_params(model, cluster['params'])
else:
raise ValueError('Invalid cluster model specified; should be'
' string or dictionary!')
if n_clusters is not None:
if cluster in ('HDBSCAN',):
warnings.warn('n_clusters is not a valid parameter for '
'HDBSCAN clustering and will be ignored.')
else:
params['n_clusters'] = n_clusters
cluster_labels = clusterer(xform, cluster={'model': model,
'params': params})
xform, labels = reshape_data(xform, cluster_labels, labels)
hue = cluster_labels
elif n_clusters is not None:
# If cluster was None default to KMeans
cluster_labels = clusterer(xform, cluster='KMeans', n_clusters=n_clusters)
xform, labels = reshape_data(xform, cluster_labels, labels)
if hue is not None:
warnings.warn('n_clusters overrides hue, ignoring hue.')
# group data if there is a grouping var
elif hue is not None:
if color is not None:
warnings.warn("Using group, color keyword will be ignored.")
# if list of lists, unpack
if any(isinstance(el, list) for el in hue):
hue = list(itertools.chain(*hue))
# if all of the elements are numbers, map them to colors
if all(isinstance(el, int) or isinstance(el, float) for el in hue):
hue = vals2bins(hue)
elif all(isinstance(el, str) for el in hue):
hue = group_by_category(hue)
# reshape the data according to group
if n_clusters is None:
xform, labels = reshape_data(xform, hue, labels)
# interpolate lines if they are grouped
if is_line(fmt):
xform = patch_lines(xform)
# handle legend
if legend is not None:
if legend is False:
legend = None
elif legend is True and hue is not None:
legend = [item for item in sorted(set(hue), key=list(hue).index)]
elif legend is True and hue is None:
legend = [i + 1 for i in range(len(xform))]
mpl_kwargs['label'] = legend
# interpolate if its a line plot
if fmt is None or isinstance(fmt, six.string_types):
if is_line(fmt):
if xform[0].shape[0] > 1:
xform = interp_array_list(xform, interp_val=frame_rate*duration/(xform[0].shape[0] - 1))
elif type(fmt) is list:
for idx, xi in enumerate(xform):
if is_line(fmt[idx]):
if xi.shape[0] > 1:
xform[idx] = interp_array_list(xi, interp_val=frame_rate*duration/(xi.shape[0] - 1))
# handle explore flag
if explore:
assert xform[0].shape[1] is 3, "Explore mode is currently only supported for 3D plots."
mpl_kwargs['picker']=True
# center
xform = center(xform)
# scale
xform = scale(xform)
# handle palette with seaborn
if isinstance(palette, np.bytes_):
palette = palette.decode("utf-8")
sns.set_palette(palette=palette, n_colors=len(xform))
sns.set_style(style='whitegrid')
# turn kwargs into a list
kwargs_list = parse_kwargs(xform, mpl_kwargs)
# handle format strings
if fmt is not None:
if type(fmt) is not list:
draw_fmt = [fmt for i in xform]
else:
draw_fmt = fmt
else:
draw_fmt = ['-']*len(x)
# convert all nans to zeros
for i, xi in enumerate(xform):
xform[i] = np.nan_to_num(xi)
# draw the plot
fig, ax, data, line_ani = _draw(xform, fmt=draw_fmt,
kwargs_list=kwargs_list,
labels=labels,
legend=legend,
title=title,
animate=animate,
duration=duration,
tail_duration=tail_duration,
rotations=rotations,
zoom=zoom,
chemtrails=chemtrails,
precog=precog,
bullettime=bullettime,
frame_rate=frame_rate,
elev=elev,
azim=azim,
explore=explore,
show=show,
size=size,
ax=ax)
# tighten layout
plt.tight_layout()
# save
if save_path is not None:
if animate:
Writer = animation.writers['ffmpeg']
writer = Writer(fps=frame_rate, bitrate=1800)
line_ani.save(save_path, writer=writer)
else:
plt.savefig(save_path)
# show the plot
if show:
plt.show()
else:
# safely closes the plot so it doesn't pop up in another call to this function
plt.close('all')
# gather reduce params
if isinstance(reduce, dict):
reduce_dict = reduce
else:
reduce_dict = {
'model' : reduce,
'params' : {
'n_components' : ndims
},
}
# gather align params
if isinstance(align, dict):
align_dict = align
else:
align_dict = {
'model' : align,
'params' : {}
}
# gather all other kwargs
kwargs = {
'fmt' : fmt,
'marker': marker,
'markers' : markers,
'linestyle' : linestyle,
'linestyles' : linestyles,
'color' : color,
'colors' : colors,
'palette' : palette,
'hue' : hue,
'ndims' : ndims,
'labels' : labels,
'legend' : legend,
'title' : title,
'animate' : animate,
'duration' : duration,
'tail_duration' : tail_duration,
'rotations' : rotations,
'zoom' : zoom,
'chemtrails' : chemtrails,
'precog' : precog,
'bullettime' : bullettime,
'frame_rate' : frame_rate,
'elev' : elev,
'azim' : azim,
'explore' : explore,
'n_clusters' : n_clusters,
'size' : size
}
# turn lists into np arrays so that they don't turn into pickles when saved
for kwarg in kwargs:
if isinstance(kwargs[kwarg], list):
try:
kwargs[kwarg]=np.array(kwargs[kwarg])
except:
warnings.warn('Could not convert all list arguments to numpy '
'arrays. If list is longer than 256 items, it '
'will automatically be pickled, which could '
'cause Python 2/3 compatibility issues for the '
'DataGeometry object.')
return DataGeometry(fig=fig, ax=ax, data=x, xform_data=xform_data,
line_ani=line_ani, reduce=reduce_dict, align=align_dict,
normalize=normalize, semantic=semantic,
vectorizer=vectorizer, corpus=corpus, kwargs=kwargs)
|
from os import path
import re
import pytest
from homeassistant import config as hass_config
import homeassistant.components.notify as notify
from homeassistant.components.smtp import DOMAIN
from homeassistant.components.smtp.notify import MailNotificationService
from homeassistant.const import SERVICE_RELOAD
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
class MockSMTP(MailNotificationService):
"""Test SMTP object that doesn't need a working server."""
def _send_email(self, msg):
"""Just return string for testing."""
return msg.as_string()
async def test_reload_notify(hass):
"""Verify we can reload the notify service."""
with patch(
"homeassistant.components.smtp.notify.MailNotificationService.connection_is_valid"
):
assert await async_setup_component(
hass,
notify.DOMAIN,
{
notify.DOMAIN: [
{
"name": DOMAIN,
"platform": DOMAIN,
"recipient": "[email protected]",
"sender": "[email protected]",
},
]
},
)
await hass.async_block_till_done()
assert hass.services.has_service(notify.DOMAIN, DOMAIN)
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"smtp/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path), patch(
"homeassistant.components.smtp.notify.MailNotificationService.connection_is_valid"
):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert not hass.services.has_service(notify.DOMAIN, DOMAIN)
assert hass.services.has_service(notify.DOMAIN, "smtp_reloaded")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
@pytest.fixture
def message():
"""Return MockSMTP object with test data."""
mailer = MockSMTP(
"localhost",
25,
5,
"[email protected]",
1,
"testuser",
"testpass",
["[email protected]", "[email protected]"],
"Home Assistant",
0,
)
yield mailer
HTML = """
<!DOCTYPE html>
<html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><meta charset="UTF-8"></head>
<body>
<div>
<h1>Intruder alert at apartment!!</h1>
</div>
<div>
<img alt="tests/testing_config/notify/test.jpg" src="cid:tests/testing_config/notify/test.jpg"/>
</div>
</body>
</html>"""
EMAIL_DATA = [
(
"Test msg",
{"images": ["tests/testing_config/notify/test.jpg"]},
"Content-Type: multipart/related",
),
(
"Test msg",
{"html": HTML, "images": ["tests/testing_config/notify/test.jpg"]},
"Content-Type: multipart/related",
),
(
"Test msg",
{"html": HTML, "images": ["test.jpg"]},
"Content-Type: multipart/related",
),
(
"Test msg",
{"html": HTML, "images": ["tests/testing_config/notify/test.pdf"]},
"Content-Type: multipart/related",
),
]
@pytest.mark.parametrize(
"message_data, data, content_type",
EMAIL_DATA,
ids=[
"Tests when sending text message and images.",
"Tests when sending text message, HTML Template and images.",
"Tests when image does not exist at mentioned location.",
"Tests when image type cannot be detected or is of wrong type.",
],
)
def test_send_message(message_data, data, content_type, hass, message):
"""Verify if we can send messages of all types correctly."""
sample_email = "<mock@mock>"
with patch("email.utils.make_msgid", return_value=sample_email):
result = message.send_message(message_data, data=data)
assert content_type in result
def test_send_text_message(hass, message):
"""Verify if we can send simple text message."""
expected = (
'^Content-Type: text/plain; charset="us-ascii"\n'
"MIME-Version: 1.0\n"
"Content-Transfer-Encoding: 7bit\n"
"Subject: Home Assistant\n"
"To: [email protected],[email protected]\n"
"From: Home Assistant <[email protected]>\n"
"X-Mailer: Home Assistant\n"
"Date: [^\n]+\n"
"Message-Id: <[^@]+@[^>]+>\n"
"\n"
"Test msg$"
)
sample_email = "<mock@mock>"
message_data = "Test msg"
with patch("email.utils.make_msgid", return_value=sample_email):
result = message.send_message(message_data)
assert re.search(expected, result)
|
import collections
from typing import MutableSequence, cast
import attr
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QApplication
from qutebrowser.config import config
from qutebrowser.mainwindow import mainwindow
instance = cast('WindowUndoManager', None)
@attr.s
class _WindowUndoEntry:
"""Information needed for :undo -w."""
geometry = attr.ib()
tab_stack = attr.ib()
class WindowUndoManager(QObject):
"""Manager which saves/restores windows."""
def __init__(self, parent=None):
super().__init__(parent)
self._undos: MutableSequence[_WindowUndoEntry] = collections.deque()
QApplication.instance().window_closing.connect(self._on_window_closing)
config.instance.changed.connect(self._on_config_changed)
@config.change_filter('tabs.undo_stack_size')
def _on_config_changed(self):
self._update_undo_stack_size()
def _on_window_closing(self, window):
if window.tabbed_browser.is_private:
return
self._undos.append(_WindowUndoEntry(
geometry=window.saveGeometry(),
tab_stack=window.tabbed_browser.undo_stack,
))
def _update_undo_stack_size(self):
newsize = config.instance.get('tabs.undo_stack_size')
if newsize < 0:
newsize = None
self._undos = collections.deque(self._undos, maxlen=newsize)
def undo_last_window_close(self):
"""Restore the last window to be closed.
It will have the same tab and undo stack as when it was closed.
"""
entry = self._undos.pop()
window = mainwindow.MainWindow(
private=False,
geometry=entry.geometry,
)
window.show()
window.tabbed_browser.undo_stack = entry.tab_stack
window.tabbed_browser.undo()
def init():
global instance
instance = WindowUndoManager(parent=QApplication.instance())
|
from datetime import timedelta
import logging
from pyombi import OmbiError
from homeassistant.helpers.entity import Entity
from .const import DOMAIN, SENSOR_TYPES
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=60)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Ombi sensor platform."""
if discovery_info is None:
return
sensors = []
ombi = hass.data[DOMAIN]["instance"]
for sensor in SENSOR_TYPES:
sensor_label = sensor
sensor_type = SENSOR_TYPES[sensor]["type"]
sensor_icon = SENSOR_TYPES[sensor]["icon"]
sensors.append(OmbiSensor(sensor_label, sensor_type, ombi, sensor_icon))
add_entities(sensors, True)
class OmbiSensor(Entity):
"""Representation of an Ombi sensor."""
def __init__(self, label, sensor_type, ombi, icon):
"""Initialize the sensor."""
self._state = None
self._label = label
self._type = sensor_type
self._ombi = ombi
self._icon = icon
@property
def name(self):
"""Return the name of the sensor."""
return f"Ombi {self._type}"
@property
def icon(self):
"""Return the icon to use in the frontend."""
return self._icon
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def update(self):
"""Update the sensor."""
try:
if self._label == "movies":
self._state = self._ombi.movie_requests
elif self._label == "tv":
self._state = self._ombi.tv_requests
elif self._label == "music":
self._state = self._ombi.music_requests
elif self._label == "pending":
self._state = self._ombi.total_requests["pending"]
elif self._label == "approved":
self._state = self._ombi.total_requests["approved"]
elif self._label == "available":
self._state = self._ombi.total_requests["available"]
except OmbiError as err:
_LOGGER.warning("Unable to update Ombi sensor: %s", err)
self._state = None
|
import re
from openrazer_daemon.hardware.device_base import RazerDevice as __RazerDevice, RazerDeviceBrightnessSuspend as __RazerDeviceBrightnessSuspend
from openrazer_daemon.dbus_services.dbus_methods import kraken as _dbus_kraken, chroma_keyboard as _dbus_chroma
class RazerKraken71(__RazerDevice):
"""
Class for the Razer Kraken 7.1
"""
EVENT_FILE_REGEX = re.compile(r'.*Razer_Kraken_7\.1_000000000000-event-if03')
USB_VID = 0x1532
USB_PID = 0x0501
METHODS = ['get_device_type_headset',
'set_static_effect', 'set_none_effect']
DEVICE_IMAGE = "https://assets.razerzone.com/eeimages/support/products/229/229_kraken_71.png"
@staticmethod
def decode_bitfield(bitfield):
return {
'state': (bitfield & 0x01) == 0x01,
'breathing1': (bitfield & 0x02) == 0x02,
'spectrum': (bitfield & 0x04) == 0x04,
'sync': (bitfield & 0x08) == 0x08,
'breathing2': (bitfield & 0x10) == 0x10,
'breathing3': (bitfield & 0x20) == 0x20,
}
def _suspend_device(self):
"""
Suspend the device
Get the current brightness level, store it for later and then set the brightness to 0
"""
self.suspend_args.clear()
self.suspend_args['effect'] = self.zone["backlight"]["effect"]
self.disable_notify = True
_dbus_chroma.set_none_effect(self)
self.disable_notify = False
def _resume_device(self):
"""
Resume the device
Get the last known brightness and then set the brightness
"""
effect = self.suspend_args.get('effect', '')
self.disable_notify = True
if effect == 'static': # Static on classic is only 1 colour
_dbus_chroma.set_static_effect(self, 0x00, 0x00, 0x00)
self.disable_notify = False
class RazerKraken71Alternate(RazerKraken71):
"""
Class for the Razer Kraken 7.1 (Alternate)
"""
USB_PID = 0x0506
class RazerKraken71Chroma(__RazerDevice):
"""
Class for the Razer Kraken 7.1 Chroma
"""
EVENT_FILE_REGEX = re.compile(r'.*Razer_Kraken_7\.1_Chroma-event-if03')
USB_VID = 0x1532
USB_PID = 0x0504
METHODS = ['get_device_type_headset',
'set_static_effect', 'set_spectrum_effect', 'set_none_effect', 'set_breath_single_effect',
'set_custom_kraken']
DEVICE_IMAGE = "https://assets.razerzone.com/eeimages/support/products/280/280_kraken_71_chroma.png"
@staticmethod
def decode_bitfield(bitfield):
return {
'state': (bitfield & 0x01) == 0x01,
'breathing1': (bitfield & 0x02) == 0x02,
'spectrum': (bitfield & 0x04) == 0x04,
'sync': (bitfield & 0x08) == 0x08,
'breathing2': (bitfield & 0x10) == 0x10,
'breathing3': (bitfield & 0x20) == 0x20,
}
def _suspend_device(self):
"""
Suspend the device
Get the current brightness level, store it for later and then set the brightness to 0
"""
self.suspend_args.clear()
self.suspend_args['effect'] = self.zone["backlight"]["effect"]
self.suspend_args['args'] = self.zone["backlight"]["colors"][0:3]
self.disable_notify = True
_dbus_chroma.set_none_effect(self)
self.disable_notify = False
def _resume_device(self):
"""
Resume the device
Get the last known brightness and then set the brightness
"""
effect = self.suspend_args.get('effect', '')
args = self.suspend_args.get('args', [])
self.disable_notify = True
if effect == 'spectrum':
_dbus_chroma.set_spectrum_effect(self)
elif effect == 'static':
_dbus_chroma.set_static_effect(self, *args)
elif effect == 'breathSingle':
_dbus_chroma.set_breath_single_effect(self, *args)
self.disable_notify = False
class RazerKraken71V2(__RazerDevice):
"""
Class for the Razer Kraken 7.1 V2
"""
EVENT_FILE_REGEX = re.compile(r'.*Razer_Kraken_7\.1_V2_0+-event-if03')
USB_VID = 0x1532
USB_PID = 0x0510
METHODS = ['get_device_type_headset',
'set_static_effect', 'set_spectrum_effect', 'set_none_effect', 'set_breath_single_effect', 'set_breath_dual_effect', 'set_breath_triple_effect',
'set_custom_kraken']
DEVICE_IMAGE = "https://assets.razerzone.com/eeimages/support/products/729/729_kraken_71_v2.png"
@staticmethod
def decode_bitfield(bitfield):
return {
'state': (bitfield & 0x01) == 0x01,
'breathing1': (bitfield & 0x02) == 0x02,
'spectrum': (bitfield & 0x04) == 0x04,
'sync': (bitfield & 0x08) == 0x08,
'breathing2': (bitfield & 0x10) == 0x10,
'breathing3': (bitfield & 0x20) == 0x20,
}
def _suspend_device(self):
"""
Suspend the device
Get the current brightness level, store it for later and then set the brightness to 0
"""
self.suspend_args.clear()
self.suspend_args['effect'] = self.zone["backlight"]["effect"]
if self.suspend_args['effect'] == "breathDual":
self.suspend_args['args'] = self.zone["backlight"]["colors"][0:6]
elif self.suspend_args['effect'] == "breathTriple":
self.suspend_args['args'] = self.zone["backlight"]["colors"][0:9]
else:
self.suspend_args['args'] = self.zone["backlight"]["colors"][0:3]
self.disable_notify = True
_dbus_chroma.set_none_effect(self)
self.disable_notify = False
def _resume_device(self):
"""
Resume the device
Get the last known brightness and then set the brightness
"""
effect = self.suspend_args.get('effect', '')
args = self.suspend_args.get('args', [])
self.disable_notify = True
if effect == 'spectrum':
_dbus_chroma.set_spectrum_effect(self)
elif effect == 'static':
_dbus_chroma.set_static_effect(self, *args)
elif effect == 'breathSingle':
_dbus_chroma.set_breath_single_effect(self, *args)
elif effect == 'breathDual':
_dbus_chroma.set_breath_dual_effect(self, *args)
elif effect == 'breathTriple':
_dbus_chroma.set_breath_triple_effect(self, *args)
self.disable_notify = False
class RazerKrakenUltimate(__RazerDevice):
"""
Class for the Razer Kraken Ultimate
"""
EVENT_FILE_REGEX = re.compile(r'.*Razer_Kraken_Ultimate_0+-event-if03')
USB_VID = 0x1532
USB_PID = 0x0527
METHODS = ['get_device_type_headset',
'set_static_effect', 'set_spectrum_effect', 'set_none_effect', 'set_breath_single_effect',
'set_breath_dual_effect', 'set_breath_triple_effect',
'get_static_effect_args_kraken', 'set_custom_kraken']
DEVICE_IMAGE = "https://assets.razerzone.com/eeimages/support/products/1603/rzr_kraken_ultimate_render01_2019_resized.png"
@staticmethod
def decode_bitfield(bitfield):
return {
'state': (bitfield & 0x01) == 0x01,
'breathing1': (bitfield & 0x02) == 0x02,
'spectrum': (bitfield & 0x04) == 0x04,
'sync': (bitfield & 0x08) == 0x08,
'breathing2': (bitfield & 0x10) == 0x10,
'breathing3': (bitfield & 0x20) == 0x20,
}
def _suspend_device(self):
"""
Suspend the device
Get the current brightness level, store it for later and then set the brightness to 0
"""
self.suspend_args.clear()
self.suspend_args['effect'] = self.zone["backlight"]["effect"]
if self.suspend_args['effect'] == "breathDual":
self.suspend_args['args'] = self.zone["backlight"]["colors"][0:6]
elif self.suspend_args['effect'] == "breathTriple":
self.suspend_args['args'] = self.zone["backlight"]["colors"][0:9]
else:
self.suspend_args['args'] = self.zone["backlight"]["colors"][0:3]
self.disable_notify = True
_dbus_chroma.set_none_effect(self)
self.disable_notify = False
def _resume_device(self):
"""
Resume the device
Get the last known brightness and then set the brightness
"""
effect = self.suspend_args.get('effect', '')
args = self.suspend_args.get('args', [])
self.disable_notify = True
if effect == 'spectrum':
_dbus_chroma.set_spectrum_effect(self)
elif effect == 'static':
_dbus_chroma.set_static_effect(self, *args)
elif effect == 'breathSingle':
_dbus_chroma.set_breath_single_effect(self, *args)
elif effect == 'breathDual':
_dbus_chroma.set_breath_dual_effect(self, *args)
elif effect == 'breathTriple':
_dbus_chroma.set_breath_triple_effect(self, *args)
self.disable_notify = False
class RazerKrakenKittyEdition(__RazerDeviceBrightnessSuspend):
"""
Class for the Razer Kraken Kitty Edition
"""
EVENT_FILE_REGEX = re.compile(r'.*Razer_Kraken_Kitty_Chroma_Control-event-if00')
USB_VID = 0x1532
USB_PID = 0x0F19
METHODS = ['get_device_type_headset',
'set_none_effect', 'set_static_effect', 'set_breath_random_effect', 'set_breath_single_effect',
'set_breath_dual_effect',
'set_custom_effect', 'set_key_row',
'set_brightness', 'get_brightness']
HAS_MATRIX = True
MATRIX_DIMS = [1, 4]
DEVICE_IMAGE = "https://hybrismediaprod.blob.core.windows.net/sys-master-phoenix-images-container/hda/h57/9055450103838/Kraken-Kitty-Edition-Black-gallery-Hero.jpg "
|
import credstash
import pytest
import botocore.exceptions
@pytest.yield_fixture
def secret():
secret = {
'name': 'test',
'version': '0000000000000000000',
'value': 'secret'
}
credstash.putSecret(secret['name'], secret['value'])
try:
yield secret
finally:
credstash.deleteSecrets("test")
def test_listSecrets(secret):
secrets = credstash.listSecrets()
del secret['value']
assert secrets == [secret]
def test_getSecret(secret):
s = credstash.getSecret(secret['name'])
assert s == secret['value']
def test_getSecret_wrong_region(secret):
try:
credstash.getSecret(secret['name'], region='us-west-2')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
assert True
else:
assert False, "expected botocore ResourceNotFoundException"
def test_getSecret_nonexistent():
try:
credstash.getSecret("bad secret")
except credstash.ItemNotFound:
assert True
else:
assert False, "expected credstash.ItemNotFound error"
def test_getAllSecrets(secret):
s = credstash.getAllSecrets()
assert s == {secret['name']:secret['value']}
def test_getAllSecrets_no_secrets():
s = credstash.getAllSecrets()
assert s == dict()
def test_deleteSecret(secret):
secrets = credstash.listSecrets()
del secret['value']
assert secrets == [secret]
credstash.deleteSecrets(secret['name'])
secrets = credstash.listSecrets()
assert secrets == []
|
from __future__ import unicode_literals
from base64 import b16encode
def b16_encode(item):
"""base16 encode"""
try:
return (b16encode(item.encode('utf-8'))).decode()
except:
return ''
|
import os
import io
import sys
import zipfile
import requests
DEPLOYER_ACCESS_TOKEN = os.getenv('DEPLOYER_ACCESS_TOKEN_SITE')
SITE_DIR = os.path.dirname(os.path.abspath(__file__))
def deploy():
imagename = 'flexxdemo:1'
# Zip it up
f = io.BytesIO()
with zipfile.ZipFile(f, 'w') as zf:
for name in os.listdir(SITE_DIR):
fullname = os.path.join(SITE_DIR, name)
if os.path.isfile(fullname):
bb = open(fullname, 'rb').read()
zf.writestr(os.path.relpath(fullname, SITE_DIR), bb)
# POST
url = 'https://deploy.canpute.com/{}/{}'.format(imagename, DEPLOYER_ACCESS_TOKEN)
r = requests.post(url, data=f.getvalue())
if r.status_code != 200:
raise RuntimeError('Publish failed: ' + r.text)
else:
print('Publish succeeded, ' + r.text)
if __name__ == '__main__':
deploy()
|
import argparse
import numpy as np
import os
import chainer
from chainer.links.caffe.caffe_function import CaffeFunction
from chainercv.links import SEResNet101
from chainercv.links import SEResNet152
from chainercv.links import SEResNet50
from chainercv.links import SEResNeXt101
from chainercv.links import SEResNeXt50
def _transfer_components(src, dst_conv, dst_bn, bname, cname):
src_conv = getattr(src, 'conv{}_{}'.format(bname, cname))
src_bn = getattr(src, 'conv{}_{}/bn'.format(bname, cname))
src_scale = getattr(src, 'conv{}_{}/bn/scale'.format(bname, cname))
if dst_conv.groups == 1:
dst_conv.W.data[:] = src_conv.W.data
else:
group_size = src_conv.W.data.shape[1] // dst_conv.groups
for group in range(dst_conv.groups):
from_idx = group_size * group
to_idx = group_size * (group + 1)
dst_conv.W.data[from_idx: to_idx, :, :, :] = \
src_conv.W.data[from_idx: to_idx, from_idx: to_idx, :, :]
dst_bn.avg_mean[:] = src_bn.avg_mean
dst_bn.avg_var[:] = src_bn.avg_var
dst_bn.gamma.data[:] = src_scale.W.data
dst_bn.beta.data[:] = src_scale.bias.b.data
def _transfer_se_components(src, dst_se, bname, cname):
src_se_down = getattr(src, 'conv{}_{}_down'.format(bname, cname))
src_se_up = getattr(src, 'conv{}_{}_up'.format(bname, cname))
hidden_size, in_size = dst_se.down.W.shape
dst_se.down.W.data[:] = src_se_down.W.data.reshape((hidden_size, in_size))
dst_se.down.b.data[:] = src_se_down.b.data
dst_se.up.W.data[:] = src_se_up.W.data.reshape((in_size, hidden_size))
dst_se.up.b.data[:] = src_se_up.b.data
def _transfer_bottleneckA(src, dst, name):
_transfer_components(
src, dst.conv1.conv, dst.conv1.bn, name, '1x1_reduce')
_transfer_components(
src, dst.conv2.conv, dst.conv2.bn, name, '3x3')
_transfer_components(
src, dst.conv3.conv, dst.conv3.bn, name, '1x1_increase')
_transfer_components(
src, dst.residual_conv.conv, dst.residual_conv.bn, name, '1x1_proj')
_transfer_se_components(src, dst.se, name, '1x1')
def _transfer_bottleneckB(src, dst, name):
_transfer_components(
src, dst.conv1.conv, dst.conv1.bn, name, '1x1_reduce')
_transfer_components(
src, dst.conv2.conv, dst.conv2.bn, name, '3x3')
_transfer_components(
src, dst.conv3.conv, dst.conv3.bn, name, '1x1_increase')
_transfer_se_components(src, dst.se, name, '1x1')
def _transfer_block(src, dst, names):
_transfer_bottleneckA(src, dst.a, names[0])
for i, name in enumerate(names[1:]):
dst_bottleneckB = getattr(dst, 'b{}'.format(i + 1))
_transfer_bottleneckB(src, dst_bottleneckB, name)
def _transfer_resnet50(src, dst, class_indices):
# Reorder weights to work on RGB and not on BGR
dst.conv1.conv.W.data[:] = src['conv1/7x7_s2'].W.data[:, ::-1]
# No bias setting for conv1, which is different from ResNet50.
dst.conv1.bn.avg_mean[:] = src['conv1/7x7_s2/bn'].avg_mean
dst.conv1.bn.avg_var[:] = src['conv1/7x7_s2/bn'].avg_var
dst.conv1.bn.gamma.data[:] = src['conv1/7x7_s2/bn/scale'].W.data
dst.conv1.bn.beta.data[:] = src['conv1/7x7_s2/bn/scale'].bias.b.data
_transfer_block(src, dst.res2, ['2_1', '2_2', '2_3'])
_transfer_block(src, dst.res3, ['3_1', '3_2', '3_3', '3_4'])
_transfer_block(src, dst.res4, ['4_1', '4_2', '4_3', '4_4', '4_5', '4_6'])
_transfer_block(src, dst.res5, ['5_1', '5_2', '5_3'])
dst.fc6.W.data[:] = src.classifier.W.data[class_indices, :]
dst.fc6.b.data[:] = src.classifier.b.data[class_indices]
def _transfer_resnet101(src, dst, class_indices):
# Reorder weights to work on RGB and not on BGR
dst.conv1.conv.W.data[:] = src['conv1/7x7_s2'].W.data[:, ::-1]
dst.conv1.bn.avg_mean[:] = src['conv1/7x7_s2/bn'].avg_mean
dst.conv1.bn.avg_var[:] = src['conv1/7x7_s2/bn'].avg_var
dst.conv1.bn.gamma.data[:] = src['conv1/7x7_s2/bn/scale'].W.data
dst.conv1.bn.beta.data[:] = src['conv1/7x7_s2/bn/scale'].bias.b.data
_transfer_block(src, dst.res2, ['2_{}'.format(i) for i in range(1, 4)])
_transfer_block(src, dst.res3, ['3_{}'.format(i) for i in range(1, 5)])
_transfer_block(src, dst.res4, ['4_{}'.format(i) for i in range(1, 24)])
_transfer_block(src, dst.res5, ['5_{}'.format(i) for i in range(1, 4)])
dst.fc6.W.data[:] = src.classifier.W.data[class_indices, :]
dst.fc6.b.data[:] = src.classifier.b.data[class_indices]
def _transfer_resnet152(src, dst, class_indices):
# Reorder weights to work on RGB and not on BGR
dst.conv1.conv.W.data[:] = src['conv1/7x7_s2'].W.data[:, ::-1]
dst.conv1.bn.avg_mean[:] = src['conv1/7x7_s2/bn'].avg_mean
dst.conv1.bn.avg_var[:] = src['conv1/7x7_s2/bn'].avg_var
dst.conv1.bn.gamma.data[:] = src['conv1/7x7_s2/bn/scale'].W.data
dst.conv1.bn.beta.data[:] = src['conv1/7x7_s2/bn/scale'].bias.b.data
_transfer_block(src, dst.res2, ['2_{}'.format(i) for i in range(1, 4)])
_transfer_block(src, dst.res3, ['3_{}'.format(i) for i in range(1, 9)])
_transfer_block(src, dst.res4, ['4_{}'.format(i) for i in range(1, 37)])
_transfer_block(src, dst.res5, ['5_{}'.format(i) for i in range(1, 4)])
dst.fc6.W.data[:] = src.classifier.W.data[class_indices, :]
dst.fc6.b.data[:] = src.classifier.b.data[class_indices]
def _load_class_indices():
# The caffemodel weights in the original repository
# (https://github.com/hujie-frank/SENet) have been trained with a modified
# order of class indices.
indices = np.zeros(1000, dtype=np.int32)
file = os.path.join(os.path.dirname(__file__), 'label_map.csv')
with open(file, 'r') as fr:
lines = fr.readlines()
for line in lines:
index_modified, index_origin = map(int, line.strip().split(','))
indices[index_origin] = index_modified
return indices
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'model_name', choices=(
'se-resnet50', 'se-resnet101', 'se-resnet152',
'se-resnext50', 'se-resnext101',
))
parser.add_argument('caffemodel')
parser.add_argument('output', nargs='?', default=None)
args = parser.parse_args()
caffemodel = CaffeFunction(args.caffemodel)
if args.model_name == 'se-resnet50':
model = SEResNet50(pretrained_model=None, n_class=1000)
model(np.zeros((1, 3, 224, 224), dtype=np.float32))
_transfer_resnet50(caffemodel, model, _load_class_indices())
elif args.model_name == 'se-resnet101':
model = SEResNet101(pretrained_model=None, n_class=1000)
model(np.zeros((1, 3, 224, 224), dtype=np.float32))
_transfer_resnet101(caffemodel, model, _load_class_indices())
elif args.model_name == 'se-resnet152':
model = SEResNet152(pretrained_model=None, n_class=1000)
model(np.zeros((1, 3, 224, 224), dtype=np.float32))
_transfer_resnet152(caffemodel, model, _load_class_indices())
elif args.model_name == 'se-resnext50':
model = SEResNeXt50(pretrained_model=None, n_class=1000)
model(np.zeros((1, 3, 224, 224), dtype=np.float32))
_transfer_resnet50(caffemodel, model, _load_class_indices())
elif args.model_name == 'se-resnext101':
model = SEResNeXt101(pretrained_model=None, n_class=1000)
model(np.zeros((1, 3, 224, 224), dtype=np.float32))
_transfer_resnet101(caffemodel, model, _load_class_indices())
if args.output is None:
output = '{}_imagenet_convert.npz'.format(args.model_name)
else:
output = args.output
chainer.serializers.save_npz(output, model)
if __name__ == '__main__':
main()
|
import unittest
from absl import flags
from perfkitbenchmarker import disk
from perfkitbenchmarker import smb_service
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
class _FakeSmbService(smb_service.BaseSmbService):
CLOUD = 'mock'
def __init__(self, disk_spec, zone):
super(_FakeSmbService, self).__init__(disk_spec, zone)
self.is_ready_called = False
def _IsReady(self):
return True
def GetRemoteAddress(self):
return '//remote1'
def GetStorageAccountAndKey(self):
return {'user': 'hello', 'pw': 'world'}
def _Create(self):
pass
def _Delete(self):
pass
class _FakeSmbServiceWithDefaultSmbVersion(_FakeSmbService):
CLOUD = 'mock2'
DEFAULT_SMB_VERSION = '3.0'
class SmbServiceTest(pkb_common_test_case.PkbCommonTestCase):
def _SetFlags(self):
FLAGS['default_timeout'].parse(10)
def _NewSmbResource(self):
return _FakeSmbService(disk.BaseDiskSpec('test_component'), 'us-west1-a')
def testNewSmbResource(self):
smb = self._NewSmbResource()
self.assertIsNone(smb.DEFAULT_SMB_VERSION)
def testRegistry(self):
smb_class = smb_service.GetSmbServiceClass(_FakeSmbService.CLOUD)
self.assertEqual(_FakeSmbService, smb_class)
def testCreateSmbDisk(self):
smb = self._NewSmbResource()
smb_disk = smb.CreateSmbDisk()
self.assertEqual('//remote1', smb_disk.device_path)
self.assertEqual({'user': 'hello', 'pw': 'world'},
smb_disk.storage_account_and_key)
self.assertEqual('3.0', smb_disk.smb_version)
def testDefaultSmbVersion(self):
self._SetFlags()
smb = _FakeSmbServiceWithDefaultSmbVersion(
disk.BaseDiskSpec('test_component'), 'us-west1-a')
smb_disk = smb.CreateSmbDisk()
self.assertEqual('3.0', smb_disk.smb_version)
if __name__ == '__main__':
unittest.main()
|
from homeassistant.core import State
from tests.common import async_mock_service
async def test_reproducing_states(hass, caplog):
"""Test reproducing Alert states."""
hass.states.async_set("alert.entity_off", "off", {})
hass.states.async_set("alert.entity_on", "on", {})
turn_on_calls = async_mock_service(hass, "alert", "turn_on")
turn_off_calls = async_mock_service(hass, "alert", "turn_off")
# These calls should do nothing as entities already in desired state
await hass.helpers.state.async_reproduce_state(
[State("alert.entity_off", "off"), State("alert.entity_on", "on")]
)
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
# Test invalid state is handled
await hass.helpers.state.async_reproduce_state(
[State("alert.entity_off", "not_supported")]
)
assert "not_supported" in caplog.text
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
# Make sure correct services are called
await hass.helpers.state.async_reproduce_state(
[
State("alert.entity_on", "off"),
State("alert.entity_off", "on"),
# Should not raise
State("alert.non_existing", "on"),
]
)
assert len(turn_on_calls) == 1
assert turn_on_calls[0].domain == "alert"
assert turn_on_calls[0].data == {
"entity_id": "alert.entity_off",
}
assert len(turn_off_calls) == 1
assert turn_off_calls[0].domain == "alert"
assert turn_off_calls[0].data == {"entity_id": "alert.entity_on"}
|
import pytest
import numpy as np
import functools
from tensornetwork.backends.abstract_backend import AbstractBackend
from tensornetwork.backends import backend_factory
from tensornetwork import backends
import tensornetwork
def jittest_init(backend):
"""
Helper to initialize data for the other Jit tests.
"""
backend_obj = backends.backend_factory.get_backend(backend)
def fun(x, A, y):
return backend_obj.multiply(x, backend_obj.multiply(A, y))
x = backend_obj.randn((4,), seed=11)
y = backend_obj.randn((4,), seed=11)
A = backend_obj.randn((4, 4), seed=11)
return (x, y, A, fun)
def test_jit(backend):
"""
Tests that tn.jit gives the right answer.
"""
x, y, A, fun = jittest_init(backend)
fun_jit = tensornetwork.jit(fun, backend=backend)
res1 = fun(x, A, y)
res2 = fun_jit(x, A, y)
np.testing.assert_allclose(res1, res2)
def test_jit_ampersand(backend):
"""
Tests that tn.jit gives the right answer when used as a decorator.
"""
x, y, A, fun = jittest_init(backend)
@functools.partial(tensornetwork.jit, static_argnums=(3,), backend=backend)
def fun_jit(x, A, y, dummy):
_ = dummy
return fun(x, A, y)
res1 = fun(x, A, y)
res2 = fun_jit(x, A, y, 2)
np.testing.assert_allclose(res1, res2)
def test_jit_args(backend):
"""
Tests that tn.jit gives the right answer when given extra arguments.
"""
x, y, A, fun = jittest_init(backend)
fun_jit = tensornetwork.jit(fun, backend=backend)
res1 = fun(x, A, y)
res2 = fun_jit(x, A, y)
res3 = fun_jit(x, y=y, A=A)
np.testing.assert_allclose(res1, res2)
np.testing.assert_allclose(res1, res3)
def test_jit_backend_argnum_is_string(backend):
"""
Tests that tn.jit gives the right answer when the backend is supplied
via backend_argnum as a string.
"""
x, y, A, fun = jittest_init(backend)
@functools.partial(tensornetwork.jit, backend_argnum=3)
def fun_jit(x, A, y, the_backend):
_ = the_backend
return fun(x, A, y)
res1 = fun(x, A, y)
res2 = fun_jit(x, A, y, backend)
np.testing.assert_allclose(res1, res2)
def test_jit_backend_argnum_is_obj(backend):
"""
Tests that tn.jit gives the right answer when the backend is supplied
via backend_argnum as a backend object.
"""
x, y, A, fun = jittest_init(backend)
@functools.partial(tensornetwork.jit, backend_argnum=3)
def fun_jit(x, A, y, the_backend):
_ = the_backend
return fun(x, A, y)
res1 = fun(x, A, y)
backend_obj = backends.backend_factory.get_backend(backend)
res2 = fun_jit(x, A, y, backend_obj)
np.testing.assert_allclose(res1, res2)
def test_jit_backend_argnum_invalid(backend):
"""
Tests that tn.jit raises ValueError when backend_argnum points to something
other than a backend.
"""
x, y, A, fun = jittest_init(backend)
with pytest.raises(ValueError):
@functools.partial(tensornetwork.jit, backend_argnum=3)
def fun_jit(x, A, y, the_backend):
_ = the_backend
return fun(x, A, y)
_ = fun_jit(x, A, y, 99)
def test_jit_backend_and_backend_obj_raises_error(backend):
"""
Tests that tn.jit raises ValueError when backend_argnum and backend
are both specified.
"""
x, y, A, fun = jittest_init(backend)
with pytest.raises(ValueError):
@functools.partial(tensornetwork.jit, backend_argnum=3, backend=backend)
def fun_jit(x, A, y, the_backend):
_ = the_backend
return fun(x, A, y)
_ = fun_jit(x, A, y, backend)
|
import os
import json
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from test import run_only
try:
from docker import Client
except ImportError:
Client = None
from docker_collector import DockerCollector
dirname = os.path.dirname(__file__)
fixtures_path = os.path.join(dirname, 'fixtures/')
def run_only_if_docker_client_is_available(func):
try:
from docker import Client
except ImportError:
Client = None
pred = lambda: Client is not None
return run_only(func, pred)
class TestDockerCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('DockerCollector', {
'interval': 10,
})
self.collector = DockerCollector(config, None)
def test_import(self):
self.assertTrue(DockerCollector)
def test_docker_stats_method_exists(self):
self.assertTrue("stats" in dir(Client))
def test_docker_stats_output_parse(self):
f = open(os.path.join(fixtures_path, "example.stat")).read()
stat = json.loads(f)
for path in self.collector.METRICS:
val = self.collector.get_value(path, stat)
self.assertTrue(val is not None)
def test_docker_stats_output_parse_fail(self):
f = open(os.path.join(fixtures_path, "example_empty.stat")).read()
stat = json.loads(f)
for path in self.collector.METRICS:
val = self.collector.get_value(path, stat)
self.assertTrue(val is None)
if __name__ == "__main__":
unittest.main()
|
import datetime
import logging
from concord232 import client as concord232_client
import requests
import voluptuous as vol
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel import PLATFORM_SCHEMA
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
)
from homeassistant.const import (
CONF_CODE,
CONF_HOST,
CONF_MODE,
CONF_NAME,
CONF_PORT,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = "localhost"
DEFAULT_NAME = "CONCORD232"
DEFAULT_PORT = 5007
DEFAULT_MODE = "audible"
SCAN_INTERVAL = datetime.timedelta(seconds=10)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_CODE): cv.string,
vol.Optional(CONF_MODE, default=DEFAULT_MODE): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Concord232 alarm control panel platform."""
name = config[CONF_NAME]
code = config.get(CONF_CODE)
mode = config[CONF_MODE]
host = config[CONF_HOST]
port = config[CONF_PORT]
url = f"http://{host}:{port}"
try:
add_entities([Concord232Alarm(url, name, code, mode)], True)
except requests.exceptions.ConnectionError as ex:
_LOGGER.error("Unable to connect to Concord232: %s", str(ex))
class Concord232Alarm(alarm.AlarmControlPanelEntity):
"""Representation of the Concord232-based alarm panel."""
def __init__(self, url, name, code, mode):
"""Initialize the Concord232 alarm panel."""
self._state = None
self._name = name
self._code = code
self._mode = mode
self._url = url
self._alarm = concord232_client.Client(self._url)
self._alarm.partitions = self._alarm.list_partitions()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def code_format(self):
"""Return the characters if code is defined."""
return alarm.FORMAT_NUMBER
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
def update(self):
"""Update values from API."""
try:
part = self._alarm.list_partitions()[0]
except requests.exceptions.ConnectionError as ex:
_LOGGER.error(
"Unable to connect to %(host)s: %(reason)s",
{"host": self._url, "reason": ex},
)
return
except IndexError:
_LOGGER.error("Concord232 reports no partitions")
return
if part["arming_level"] == "Off":
self._state = STATE_ALARM_DISARMED
elif "Home" in part["arming_level"]:
self._state = STATE_ALARM_ARMED_HOME
else:
self._state = STATE_ALARM_ARMED_AWAY
def alarm_disarm(self, code=None):
"""Send disarm command."""
if not self._validate_code(code, STATE_ALARM_DISARMED):
return
self._alarm.disarm(code)
def alarm_arm_home(self, code=None):
"""Send arm home command."""
if not self._validate_code(code, STATE_ALARM_ARMED_HOME):
return
if self._mode == "silent":
self._alarm.arm("stay", "silent")
else:
self._alarm.arm("stay")
def alarm_arm_away(self, code=None):
"""Send arm away command."""
if not self._validate_code(code, STATE_ALARM_ARMED_AWAY):
return
self._alarm.arm("away")
def _validate_code(self, code, state):
"""Validate given code."""
if self._code is None:
return True
if isinstance(self._code, str):
alarm_code = self._code
else:
alarm_code = self._code.render(from_state=self._state, to_state=state)
check = not alarm_code or code == alarm_code
if not check:
_LOGGER.warning("Invalid code given for %s", state)
return check
|
from datetime import date, timedelta
import logging
import recollect_waste
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_PICKUP_TYPES = "pickup_types"
ATTR_AREA_NAME = "area_name"
ATTR_NEXT_PICKUP_TYPES = "next_pickup_types"
ATTR_NEXT_PICKUP_DATE = "next_pickup_date"
CONF_PLACE_ID = "place_id"
CONF_SERVICE_ID = "service_id"
DEFAULT_NAME = "recollect_waste"
ICON = "mdi:trash-can-outline"
SCAN_INTERVAL = timedelta(days=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PLACE_ID): cv.string,
vol.Required(CONF_SERVICE_ID): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Recollect Waste platform."""
client = recollect_waste.RecollectWasteClient(
config[CONF_PLACE_ID], config[CONF_SERVICE_ID]
)
# Ensure the client can connect to the API successfully
# with given place_id and service_id.
try:
client.get_next_pickup()
except recollect_waste.RecollectWasteException as ex:
_LOGGER.error("Recollect Waste platform error. %s", ex)
return
add_entities([RecollectWasteSensor(config.get(CONF_NAME), client)], True)
class RecollectWasteSensor(Entity):
"""Recollect Waste Sensor."""
def __init__(self, name, client):
"""Initialize the sensor."""
self._attributes = {}
self._name = name
self._state = None
self.client = client
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self.client.place_id}{self.client.service_id}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
@property
def icon(self):
"""Icon to use in the frontend."""
return ICON
def update(self):
"""Update device state."""
try:
pickup_event_array = self.client.get_pickup_events(
date.today(), date.today() + timedelta(weeks=4)
)
except recollect_waste.RecollectWasteException as ex:
_LOGGER.error("Recollect Waste platform error. %s", ex)
else:
pickup_event = pickup_event_array[0]
next_pickup_event = pickup_event_array[1]
next_date = str(next_pickup_event.event_date)
self._state = pickup_event.event_date
self._attributes.update(
{
ATTR_PICKUP_TYPES: pickup_event.pickup_types,
ATTR_AREA_NAME: pickup_event.area_name,
ATTR_NEXT_PICKUP_TYPES: next_pickup_event.pickup_types,
ATTR_NEXT_PICKUP_DATE: next_date,
}
)
|
from redbot import core
from redbot.core import VersionInfo
def test_version_working():
assert hasattr(core, "__version__")
assert core.__version__[0] == "3"
# When adding more of these, ensure they are added in ascending order of precedence
version_tests = (
"3.0.0a32.post10.dev12",
"3.0.0rc1.dev1",
"3.0.0rc1",
"3.0.0",
"3.0.1",
"3.0.1.post1.dev1",
"3.0.1.post1",
"2018.10.6b21",
)
def test_version_info_str_parsing():
for version_str in version_tests:
assert version_str == str(VersionInfo.from_str(version_str))
def test_version_info_lt():
for next_idx, cur in enumerate(version_tests[:-1], start=1):
cur_test = VersionInfo.from_str(cur)
next_test = VersionInfo.from_str(version_tests[next_idx])
assert cur_test < next_test
def test_version_info_gt():
assert VersionInfo.from_str(version_tests[1]) > VersionInfo.from_str(version_tests[0])
|
import logging
import multiprocessing
import queue
import time
from gi.repository import GLib
from meld.matchers import myers
log = logging.getLogger(__name__)
class MatcherWorker(multiprocessing.Process):
END_TASK = -1
matcher_class = myers.InlineMyersSequenceMatcher
def __init__(self, tasks, results):
super().__init__()
self.tasks = tasks
self.results = results
self.daemon = True
def run(self):
while True:
task_id, (text1, textn) = self.tasks.get()
if task_id == self.END_TASK:
break
try:
matcher = self.matcher_class(None, text1, textn)
self.results.put((task_id, matcher.get_opcodes()))
except Exception as e:
log.error("Exception while running diff: %s", e)
time.sleep(0)
class CachedSequenceMatcher:
"""Simple class for caching diff results, with LRU-based eviction
Results from the SequenceMatcher are cached and timestamped, and
subsequently evicted based on least-recent generation/usage. The LRU-based
eviction is overly simplistic, but is okay for our usage pattern.
"""
TASK_GRACE_PERIOD = 1
def __init__(self, scheduler):
"""Create a new caching sequence matcher
:param scheduler: a `meld.task.SchedulerBase` used to schedule
sequence comparison result checks
"""
self.scheduler = scheduler
self.cache = {}
self.tasks = multiprocessing.Queue()
self.tasks.cancel_join_thread()
# Limiting the result queue here has the effect of giving us
# much better interactivity. Without this limit, the
# result-checker tends to get starved and all highlights get
# delayed until we're almost completely finished.
self.results = multiprocessing.Queue(5)
self.results.cancel_join_thread()
self.thread = MatcherWorker(self.tasks, self.results)
self.task_id = 1
self.queued_matches = {}
GLib.idle_add(self.thread.start)
def __del__(self):
self.tasks.put((MatcherWorker.END_TASK, ('', '')))
self.thread.join(self.TASK_GRACE_PERIOD)
if self.thread.exitcode is None:
self.thread.terminate()
def match(self, text1, textn, cb):
texts = (text1, textn)
try:
self.cache[texts][1] = time.time()
opcodes = self.cache[texts][0]
GLib.idle_add(lambda: cb(opcodes))
except KeyError:
GLib.idle_add(lambda: self.enqueue_task(texts, cb))
def enqueue_task(self, texts, cb):
if not bool(self.queued_matches):
self.scheduler.add_task(self.check_results)
self.queued_matches[self.task_id] = (texts, cb)
self.tasks.put((self.task_id, texts))
self.task_id += 1
def check_results(self):
try:
task_id, opcodes = self.results.get(block=True, timeout=0.01)
texts, cb = self.queued_matches.pop(task_id)
self.cache[texts] = [opcodes, time.time()]
GLib.idle_add(lambda: cb(opcodes))
except queue.Empty:
pass
return bool(self.queued_matches)
def clean(self, size_hint):
"""Clean the cache if necessary
@param size_hint: the recommended minimum number of cache entries
"""
if len(self.cache) <= size_hint * 3:
return
items = list(self.cache.items())
items.sort(key=lambda it: it[1][1])
for item in items[:-size_hint * 2]:
del self.cache[item[0]]
|
import json
import mock
import pytest
from kazoo.client import KazooClient
from kazoo.exceptions import BadVersionError
from kazoo.exceptions import NodeExistsError
from kazoo.exceptions import NoNodeError
from paasta_tools.frameworks.task_store import DictTaskStore
from paasta_tools.frameworks.task_store import MesosTaskParameters
from paasta_tools.frameworks.task_store import ZKTaskStore
def test_DictTaskStore():
task_store = DictTaskStore(
service_name="foo",
instance_name="bar",
framework_id="foo",
system_paasta_config=None,
)
task_store.add_task_if_doesnt_exist("task_id", mesos_task_state="foo")
task_store.update_task("task_id", is_draining=True)
assert task_store.get_all_tasks() == {
"task_id": MesosTaskParameters(mesos_task_state="foo", is_draining=True)
}
task_store.update_task("task_id", mesos_task_state="bar")
assert task_store.get_all_tasks() == {
"task_id": MesosTaskParameters(mesos_task_state="bar", is_draining=True)
}
class TestMesosTaskParameters:
def test_serdes(self):
param_dict = {
"health": "health",
"mesos_task_state": "mesos_task_state",
"is_draining": True,
"is_healthy": True,
"offer": "offer",
"resources": "resources",
}
assert json.loads(MesosTaskParameters(**param_dict).serialize()) == param_dict
assert MesosTaskParameters.deserialize(
json.dumps(param_dict)
) == MesosTaskParameters(**param_dict)
class TestZKTaskStore:
@pytest.yield_fixture
def mock_zk_client(self):
spec_zk_client = KazooClient()
mock_zk_client = mock.Mock(spec=spec_zk_client)
with mock.patch(
"paasta_tools.frameworks.task_store.KazooClient",
autospec=True,
return_value=mock_zk_client,
):
yield mock_zk_client
def test_get_task(self, mock_zk_client):
zk_task_store = ZKTaskStore(
service_name="a",
instance_name="b",
framework_id="c",
system_paasta_config=mock.Mock(),
)
fake_znodestat = mock.Mock()
zk_task_store.zk_client.get.return_value = (
'{"health": "healthy"}',
fake_znodestat,
)
params, stat = zk_task_store._get_task("d")
zk_task_store.zk_client.get.assert_called_once_with("/d")
assert stat == fake_znodestat
assert params.health == "healthy"
def test_update_task(self, mock_zk_client):
zk_task_store = ZKTaskStore(
service_name="a",
instance_name="b",
framework_id="c",
system_paasta_config=mock.Mock(),
)
# Happy case - task exists, no conflict on update.
fake_znodestat = mock.Mock(version=1)
zk_task_store.zk_client.get.return_value = (
'{"health": "healthy"}',
fake_znodestat,
)
new_params = zk_task_store.update_task("task_id", is_draining=True)
assert new_params.is_draining is True
assert new_params.health == "healthy"
# Second happy case - no task exists.
fake_znodestat = mock.Mock(version=1)
zk_task_store.zk_client.get.side_effect = NoNodeError()
new_params = zk_task_store.update_task("task_id", is_draining=True)
assert new_params.is_draining is True
assert new_params.health is None
# Someone changed our data out from underneath us.
zk_task_store.zk_client.get.reset_mock()
zk_task_store.zk_client.set.reset_mock()
zk_task_store.zk_client.get.side_effect = [
('{"health": "healthy"}', mock.Mock(version=1)),
('{"health": "healthy", "offer": "offer"}', mock.Mock(version=2)),
(
'{"health": "healthy", "offer": "offer", "resources": "resources"}',
mock.Mock(version=3),
),
]
zk_task_store.zk_client.set.side_effect = [
BadVersionError,
BadVersionError,
None,
]
new_params = zk_task_store.update_task("task_id", is_draining=True)
assert zk_task_store.zk_client.get.call_count == 3
zk_task_store.zk_client.get.assert_has_calls(
[mock.call("/task_id"), mock.call("/task_id"), mock.call("/task_id")]
)
assert zk_task_store.zk_client.set.call_count == 3
zk_task_store.zk_client.set.assert_has_calls(
[
mock.call("/task_id", mock.ANY, version=1),
mock.call("/task_id", mock.ANY, version=2),
mock.call("/task_id", mock.ANY, version=3),
]
)
assert new_params.is_draining is True
assert new_params.health == "healthy"
assert new_params.offer == "offer"
assert new_params.resources == "resources"
# Data wasn't there when we read it, but then was when we tried to create it
zk_task_store.zk_client.get.reset_mock()
zk_task_store.zk_client.set.reset_mock()
zk_task_store.zk_client.create.reset_mock()
zk_task_store.zk_client.get.side_effect = [
NoNodeError,
('{"health": "healthy"}', mock.Mock(version=1)),
]
zk_task_store.zk_client.create.side_effect = [NodeExistsError]
zk_task_store.zk_client.set.side_effect = [None]
new_params = zk_task_store.update_task("task_id", is_draining=True)
assert zk_task_store.zk_client.get.call_count == 2
zk_task_store.zk_client.get.assert_has_calls(
[mock.call("/task_id"), mock.call("/task_id")]
)
assert zk_task_store.zk_client.create.call_count == 1
zk_task_store.zk_client.create.assert_has_calls(
[mock.call("/task_id", mock.ANY)]
)
assert zk_task_store.zk_client.set.call_count == 1
zk_task_store.zk_client.set.assert_has_calls(
[mock.call("/task_id", mock.ANY, version=1)]
)
assert new_params.is_draining is True
assert new_params.health == "healthy"
assert new_params.offer is None
|
import argparse
import time
from typing import Any
from typing import Iterable
from typing import Mapping
from typing import MutableMapping
from typing import NamedTuple
from typing import Optional
from typing import Tuple
import a_sync
import simplejson as json
from kubernetes.client import V1Pod
from kubernetes.client import V1ResourceRequirements
from paasta_tools import kubernetes_tools
from paasta_tools import mesos_tools
from paasta_tools.kubernetes_tools import KubeClient
from paasta_tools.mesos.exceptions import SlaveDoesNotExist
from paasta_tools.mesos.task import Task
from paasta_tools.utils import load_system_paasta_config
MAIN_CONTAINER_TYPE = "main"
class TaskAllocationInfo(NamedTuple):
paasta_service: str
paasta_instance: str
container_type: str
paasta_pool: str
resources: Mapping[str, float]
start_time: float
docker_id: str
pod_name: str
pod_ip: str
host_ip: str
mesos_container_id: str # Because Mesos task info does not have docker id
def get_container_info_from_mesos_task(
task: Task,
) -> Tuple[Optional[str], Optional[float]]:
for status in task["statuses"]:
if status["state"] != "TASK_RUNNING":
continue
container_id = (
status.get("container_status", {}).get("container_id", {}).get("value")
)
time_start = status.get("timestamp")
return container_id, time_start
return None, None
def get_paasta_service_instance_from_mesos_task(
task: Task,
) -> Tuple[Optional[str], Optional[str]]:
try:
docker_params = task["container"].get("docker", {}).get("parameters", [])
except KeyError:
return None, None
service, instance = None, None
for param in docker_params:
if param["key"] == "label":
label = param["value"]
if label.startswith("paasta_service="):
service = label.split("=")[1]
if label.startswith("paasta_instance="):
instance = label.split("=")[1]
return service, instance
async def get_pool_from_mesos_task(task: Task) -> Optional[str]:
try:
attributes = (await task.slave())["attributes"]
return attributes.get("pool", "default")
except SlaveDoesNotExist:
return None
@a_sync.to_blocking
async def get_mesos_task_allocation_info() -> Iterable[TaskAllocationInfo]:
tasks = await mesos_tools.get_cached_list_of_running_tasks_from_frameworks()
info_list = []
for task in tasks:
mesos_container_id, start_time = get_container_info_from_mesos_task(task)
paasta_service, paasta_instance = get_paasta_service_instance_from_mesos_task(
task
)
paasta_pool = await get_pool_from_mesos_task(task)
info_list.append(
TaskAllocationInfo(
paasta_service=paasta_service,
paasta_instance=paasta_instance,
container_type=MAIN_CONTAINER_TYPE,
paasta_pool=paasta_pool,
resources=task["resources"],
start_time=start_time,
docker_id=None,
pod_name=None,
pod_ip=None,
host_ip=None,
mesos_container_id=mesos_container_id,
)
)
return info_list
def get_all_running_kubernetes_pods(
kube_client: KubeClient, namespace: str
) -> Iterable[V1Pod]:
running = []
for pod in kubernetes_tools.get_all_pods(kube_client, namespace):
if kubernetes_tools.get_pod_status(pod) == kubernetes_tools.PodStatus.RUNNING:
running.append(pod)
return running
def get_kubernetes_resource_request(
resources: V1ResourceRequirements,
) -> Mapping[str, float]:
if not resources:
requests: Mapping[str, str] = {}
else:
requests = resources.requests or {}
parsed = kubernetes_tools.parse_container_resources(requests)
return {
"cpus": parsed.cpus,
"mem": parsed.mem,
"disk": parsed.disk,
}
def get_kubernetes_metadata(
pod: V1Pod,
) -> Tuple[
Optional[str],
Optional[str],
Optional[str],
Optional[str],
Optional[str],
Optional[str],
]:
labels = pod.metadata.labels or {}
node_selectors = pod.spec.node_selector or {}
pod_name = pod.metadata.name
pod_ip = pod.status.pod_ip
host_ip = pod.status.host_ip
service = labels.get("paasta.yelp.com/service")
instance = labels.get("paasta.yelp.com/instance")
pool = node_selectors.get("yelp.com/pool", "default")
return service, instance, pool, pod_name, pod_ip, host_ip
def get_container_type(container_name: str, instance_name: str) -> str:
"""
To differentiate between main service containers and sidecars
"""
if instance_name and container_name == kubernetes_tools.sanitise_kubernetes_name(
instance_name
):
return MAIN_CONTAINER_TYPE
else:
return container_name
def get_kubernetes_task_allocation_info(namespace: str) -> Iterable[TaskAllocationInfo]:
client = KubeClient()
pods = get_all_running_kubernetes_pods(client, namespace)
info_list = []
for pod in pods:
service, instance, pool, pod_name, pod_ip, host_ip = get_kubernetes_metadata(
pod
)
name_to_info: MutableMapping[str, Any] = {}
for container in pod.spec.containers:
name_to_info[container.name] = {
"resources": get_kubernetes_resource_request(container.resources),
"container_type": get_container_type(container.name, instance),
"pod_name": pod_name,
"pod_ip": pod_ip,
"host_ip": host_ip,
}
container_statuses = pod.status.container_statuses or []
for container in container_statuses:
if not container.state.running:
continue
# docker://abcdef
docker_id = (
container.container_id.split("/")[-1]
if container.container_id
else None
)
update = {
"docker_id": docker_id,
"start_time": container.state.running.started_at.timestamp(),
}
name_to_info[container.name].update(update)
for info in name_to_info.values():
info_list.append(
TaskAllocationInfo(
paasta_service=service,
paasta_instance=instance,
container_type=info.get("container_type"),
paasta_pool=pool,
resources=info.get("resources"),
start_time=info.get("start_time"),
docker_id=info.get("docker_id"),
pod_name=info.get("pod_name"),
pod_ip=info.get("pod_ip"),
host_ip=info.get("host_ip"),
mesos_container_id=None,
)
)
return info_list
def get_task_allocation_info(
scheduler: str, namespace: str
) -> Iterable[TaskAllocationInfo]:
if scheduler == "mesos":
return get_mesos_task_allocation_info()
elif scheduler == "kubernetes":
return get_kubernetes_task_allocation_info(namespace)
else:
return []
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--scheduler",
help="Scheduler to get task info from",
dest="scheduler",
default="mesos",
choices=["mesos", "kubernetes"],
)
parser.add_argument(
"--namespace-prefix",
help="prefix of the namespace to fetch the logs for"
"Used only when scheduler is kubernetes",
dest="namespace_prefix",
default="paasta",
)
return parser.parse_args()
def main(args: argparse.Namespace) -> None:
cluster = load_system_paasta_config().get_cluster()
if args.scheduler == "mesos":
display_task_allocation_info(cluster, args.scheduler, args.namespace_prefix)
else:
client = KubeClient()
all_namespaces = kubernetes_tools.get_all_namespaces(client)
matching_namespaces = [
n for n in all_namespaces if n.startswith(args.namespace_prefix)
]
for matching_namespace in matching_namespaces:
display_task_allocation_info(cluster, args.scheduler, matching_namespace)
def display_task_allocation_info(cluster, scheduler, namespace):
info_list = get_task_allocation_info(scheduler, namespace)
timestamp = time.time()
for info in info_list:
info_dict = info._asdict()
info_dict["cluster"] = cluster
info_dict["timestamp"] = timestamp
print(json.dumps(info_dict))
if __name__ == "__main__":
args = parse_args()
main(args)
|
import pytest
from hangups import channel
@pytest.mark.parametrize('input_,expected', [
(b'79\n[[0,["c","98803CAAD92268E8","",8]\n]\n,'
b'[1,[{"gsid":"7tCoFHumSL-IT6BHpCaxLA"}]]\n]\n',
('98803CAAD92268E8', '7tCoFHumSL-IT6BHpCaxLA')),
])
def test_parse_sid_response(input_, expected):
assert channel._parse_sid_response(input_) == expected
@pytest.mark.parametrize('input_,expected', [
# '€' is 3 bytes in UTF-8.
('€€'.encode()[:6], '€€'),
('€€'.encode()[:5], '€'),
('€€'.encode()[:4], '€'),
('€€'.encode()[:3], '€'),
('€€'.encode()[:2], ''),
('€€'.encode()[:1], ''),
('€€'.encode()[:0], ''),
])
def test_best_effort_decode(input_, expected):
assert channel._best_effort_decode(input_) == expected
def test_simple():
p = channel.ChunkParser()
assert list(p.get_chunks('10\n01234567893\nabc'.encode())) == [
'0123456789',
'abc',
]
def test_truncated_message():
p = channel.ChunkParser()
assert list(p.get_chunks('12\n012345678'.encode())) == []
def test_junk_before_length():
p = channel.ChunkParser()
assert list(p.get_chunks('junk4\nfail'.encode())) == []
def test_truncated_length():
p = channel.ChunkParser()
assert list(p.get_chunks('13'.encode())) == []
def test_malformed_length():
p = channel.ChunkParser()
# TODO: could detect errors like these with some extra work
assert list(p.get_chunks('11\n0123456789\n5e\n"abc"'.encode())) == [
'0123456789\n'
]
def test_incremental():
p = channel.ChunkParser()
assert list(p.get_chunks(''.encode())) == []
assert list(p.get_chunks('5'.encode())) == []
assert list(p.get_chunks('\n'.encode())) == []
assert list(p.get_chunks('abc'.encode())) == []
assert list(p.get_chunks('de'.encode())) == ['abcde']
assert list(p.get_chunks(''.encode())) == []
def test_unicode():
p = channel.ChunkParser()
# smile is actually 2 code units
assert list(p.get_chunks('3\na😀'.encode())) == ['a😀']
def test_split_characters():
p = channel.ChunkParser()
assert list(p.get_chunks(b'1\n\xe2\x82')) == []
assert list(p.get_chunks(b'\xac')) == ['€']
|
from __future__ import absolute_import, unicode_literals
import os
import pytest
import kaptan
from libtmux import Window
from libtmux.common import has_gte_version
from libtmux.test import retry, temp_session
from tmuxp import config, exc
from tmuxp._compat import text_type
from tmuxp.workspacebuilder import WorkspaceBuilder
from . import example_dir, fixtures_dir
from .fixtures._util import loadfixture
def test_split_windows(session):
yaml_config = loadfixture("workspacebuilder/two_pane.yaml")
s = session
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
builder = WorkspaceBuilder(sconf=sconfig)
window_count = len(session._windows) # current window count
assert len(s._windows) == window_count
for w, wconf in builder.iter_create_windows(s):
for p in builder.iter_create_panes(w, wconf):
w.select_layout('tiled') # fix glitch with pane size
p = p
assert len(s._windows) == window_count
assert isinstance(w, Window)
assert len(s._windows) == window_count
window_count += 1
def test_split_windows_three_pane(session):
yaml_config = loadfixture("workspacebuilder/three_pane.yaml")
s = session
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
builder = WorkspaceBuilder(sconf=sconfig)
window_count = len(s._windows) # current window count
assert len(s._windows) == window_count
for w, wconf in builder.iter_create_windows(s):
for p in builder.iter_create_panes(w, wconf):
w.select_layout('tiled') # fix glitch with pane size
p = p
assert len(s._windows) == window_count
assert isinstance(w, Window)
assert len(s._windows) == window_count
window_count += 1
w.set_window_option('main-pane-height', 50)
w.select_layout(wconf['layout'])
def test_focus_pane_index(session):
yaml_config = loadfixture('workspacebuilder/focus_and_pane.yaml')
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
sconfig = config.expand(sconfig)
sconfig = config.trickle(sconfig)
builder = WorkspaceBuilder(sconf=sconfig)
builder.build(session=session)
assert session.attached_window.name == 'focused window'
pane_base_index = int(
session.attached_window.show_window_option('pane-base-index', g=True)
)
if not pane_base_index:
pane_base_index = 0
else:
pane_base_index = int(pane_base_index)
# get the pane index for each pane
pane_base_indexes = []
for pane in session.attached_window.panes:
pane_base_indexes.append(int(pane.index))
pane_indexes_should_be = [pane_base_index + x for x in range(0, 3)]
assert pane_indexes_should_be == pane_base_indexes
w = session.attached_window
assert w.name != 'man'
pane_path = '/usr'
while retry():
p = w.attached_pane
p.server._update_panes()
if p.current_path == pane_path:
break
assert p.current_path == pane_path
proc = session.cmd('show-option', '-gv', 'base-index')
base_index = int(proc.stdout[0])
session.server._update_windows()
window3 = session.find_where({'window_index': str(base_index + 2)})
assert isinstance(window3, Window)
p = None
pane_path = '/'
while retry():
p = window3.attached_pane
p.server._update_panes()
if p.current_path == pane_path:
break
assert p.current_path == pane_path
@pytest.mark.skip(
reason='''
Test needs to be rewritten, assertion not reliable across platforms
and CI. See https://github.com/tmux-python/tmuxp/issues/310.
'''.strip()
)
def test_suppress_history(session):
yaml_config = loadfixture("workspacebuilder/suppress_history.yaml")
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
sconfig = config.expand(sconfig)
sconfig = config.trickle(sconfig)
builder = WorkspaceBuilder(sconf=sconfig)
builder.build(session=session)
inHistoryWindow = session.find_where({'window_name': 'inHistory'})
isMissingWindow = session.find_where({'window_name': 'isMissing'})
def assertHistory(cmd, hist):
return 'inHistory' in cmd and cmd.endswith(hist)
def assertIsMissing(cmd, hist):
return 'isMissing' in cmd and not cmd.endswith(hist)
for w, window_name, assertCase in [
(inHistoryWindow, 'inHistory', assertHistory),
(isMissingWindow, 'isMissing', assertIsMissing),
]:
assert w.name == window_name
correct = False
w.select_window()
p = w.attached_pane
p.select_pane()
# Print the last-in-history command in the pane
p.cmd('send-keys', ' fc -ln -1')
p.cmd('send-keys', 'Enter')
buffer_name = 'test'
while retry():
# from v0.7.4 libtmux session.cmd adds target -t self.id by default
# show-buffer doesn't accept -t, use global cmd.
# Get the contents of the pane
p.cmd('capture-pane', '-b', buffer_name)
captured_pane = session.server.cmd('show-buffer', '-b', buffer_name)
session.server.cmd('delete-buffer', '-b', buffer_name)
# Parse the sent and last-in-history commands
sent_cmd = captured_pane.stdout[0].strip()
history_cmd = captured_pane.stdout[-2].strip()
if assertCase(sent_cmd, history_cmd):
correct = True
break
assert correct, "Unknown sent command: [%s] in %s" % (sent_cmd, assertCase)
def test_session_options(session):
yaml_config = loadfixture("workspacebuilder/session_options.yaml")
s = session
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
sconfig = config.expand(sconfig)
builder = WorkspaceBuilder(sconf=sconfig)
builder.build(session=session)
assert "/bin/sh" in s.show_option('default-shell')
assert "/bin/sh" in s.show_option('default-command')
def test_global_options(session):
yaml_config = loadfixture("workspacebuilder/global_options.yaml")
s = session
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
sconfig = config.expand(sconfig)
builder = WorkspaceBuilder(sconf=sconfig)
builder.build(session=session)
assert "top" in s.show_option('status-position', _global=True)
assert 493 == s.show_option('repeat-time', _global=True)
def test_global_session_env_options(session, monkeypatch):
visual_silence = 'on'
monkeypatch.setenv(str('VISUAL_SILENCE'), str(visual_silence))
repeat_time = 738
monkeypatch.setenv(str('REPEAT_TIME'), str(repeat_time))
main_pane_height = 8
monkeypatch.setenv(str('MAIN_PANE_HEIGHT'), str(main_pane_height))
yaml_config = loadfixture("workspacebuilder/env_var_options.yaml")
s = session
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
sconfig = config.expand(sconfig)
builder = WorkspaceBuilder(sconf=sconfig)
builder.build(session=session)
assert visual_silence in s.show_option('visual-silence', _global=True)
assert repeat_time == s.show_option('repeat-time')
assert main_pane_height == s.attached_window.show_window_option('main-pane-height')
def test_window_options(session):
yaml_config = loadfixture("workspacebuilder/window_options.yaml")
s = session
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
sconfig = config.expand(sconfig)
if has_gte_version('2.3'):
sconfig['windows'][0]['options']['pane-border-format'] = ' #P '
builder = WorkspaceBuilder(sconf=sconfig)
window_count = len(session._windows) # current window count
assert len(s._windows) == window_count
for w, wconf in builder.iter_create_windows(s):
for p in builder.iter_create_panes(w, wconf):
w.select_layout('tiled') # fix glitch with pane size
p = p
assert len(s._windows) == window_count
assert isinstance(w, Window)
assert w.show_window_option('main-pane-height') == 5
if has_gte_version('2.3'):
assert w.show_window_option('pane-border-format') == ' #P '
assert len(s._windows) == window_count
window_count += 1
w.select_layout(wconf['layout'])
@pytest.mark.flaky(reruns=5)
def test_window_options_after(session):
yaml_config = loadfixture("workspacebuilder/window_options_after.yaml")
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
sconfig = config.expand(sconfig)
builder = WorkspaceBuilder(sconf=sconfig)
builder.build(session=session)
def assert_last_line(p, s):
correct = False
while retry():
pane_out = p.cmd('capture-pane', '-p', '-J').stdout
while not pane_out[-1].strip(): # delete trailing lines tmux 1.8
pane_out.pop()
if len(pane_out) > 1 and pane_out[-2].strip() == s:
correct = True
break
# Print output for easier debugging if assertion fails
if not correct:
print('\n'.join(pane_out))
return correct
for i, pane in enumerate(session.attached_window.panes):
assert assert_last_line(
pane, str(i)
), "Initial command did not execute properly/" + str(i)
pane.cmd('send-keys', 'Up') # Will repeat echo
pane.enter() # in each iteration
assert assert_last_line(
pane, str(i)
), "Repeated command did not execute properly/" + str(i)
session.cmd('send-keys', ' echo moo')
session.cmd('send-keys', 'Enter')
for pane in session.attached_window.panes:
assert assert_last_line(
pane, 'moo'
), "Synchronized command did not execute properly"
def test_window_shell(session):
yaml_config = loadfixture("workspacebuilder/window_shell.yaml")
s = session
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
sconfig = config.expand(sconfig)
builder = WorkspaceBuilder(sconf=sconfig)
for w, wconf in builder.iter_create_windows(s):
if 'window_shell' in wconf:
assert wconf['window_shell'] == text_type('top')
while retry():
session.server._update_windows()
if w['window_name'] != 'top':
break
assert w.name != text_type('top')
def test_environment_variables(session):
yaml_config = loadfixture("workspacebuilder/environment_vars.yaml")
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
sconfig = config.expand(sconfig)
builder = WorkspaceBuilder(sconf=sconfig)
builder.build(session)
assert session.show_environment('FOO') == 'BAR'
assert session.show_environment('PATH') == '/tmp'
def test_automatic_rename_option(session):
"""With option automatic-rename: on."""
yaml_config = loadfixture("workspacebuilder/window_automatic_rename.yaml")
s = session
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
builder = WorkspaceBuilder(sconf=sconfig)
window_count = len(session._windows) # current window count
assert len(s._windows) == window_count
for w, wconf in builder.iter_create_windows(s):
for p in builder.iter_create_panes(w, wconf):
w.select_layout('tiled') # fix glitch with pane size
p = p
assert len(s._windows), window_count
assert isinstance(w, Window)
assert w.show_window_option('automatic-rename') == 'on'
assert len(s._windows) == window_count
window_count += 1
w.select_layout(wconf['layout'])
assert s.name != 'tmuxp'
w = s.windows[0]
while retry():
session.server._update_windows()
if w.name != 'sh':
break
assert w.name != 'sh'
pane_base_index = w.show_window_option('pane-base-index', g=True)
w.select_pane(pane_base_index)
while retry():
session.server._update_windows()
if w.name == 'sh':
break
assert w.name == text_type('sh')
w.select_pane('-D')
while retry():
session.server._update_windows()
if w['window_name'] != 'sh':
break
assert w.name != text_type('sh')
def test_blank_pane_count(session):
""":todo: Verify blank panes of various types build into workspaces."""
yaml_config_file = os.path.join(example_dir, 'blank-panes.yaml')
test_config = kaptan.Kaptan().import_config(yaml_config_file).get()
test_config = config.expand(test_config)
builder = WorkspaceBuilder(sconf=test_config)
builder.build(session=session)
assert session == builder.session
window1 = session.find_where({'window_name': 'Blank pane test'})
assert len(window1._panes) == 3
window2 = session.find_where({'window_name': 'More blank panes'})
assert len(window2._panes) == 3
window3 = session.find_where({'window_name': 'Empty string (return)'})
assert len(window3._panes) == 3
window4 = session.find_where({'window_name': 'Blank with options'})
assert len(window4._panes) == 2
def test_start_directory(session, tmpdir):
yaml_config = loadfixture("workspacebuilder/start_directory.yaml")
test_dir = str(tmpdir.mkdir('foo bar'))
test_config = yaml_config.format(TMP_DIR=str(tmpdir), TEST_DIR=test_dir)
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(test_config).get()
sconfig = config.expand(sconfig)
sconfig = config.trickle(sconfig)
builder = WorkspaceBuilder(sconf=sconfig)
builder.build(session=session)
assert session == builder.session
dirs = ['/usr/bin', '/dev', test_dir, '/usr', '/usr']
for path, window in zip(dirs, session.windows):
for p in window.panes:
while retry():
p.server._update_panes()
pane_path = p.current_path
if pane_path is None:
pass
elif path in pane_path or pane_path == path:
result = path == pane_path or path in pane_path
break
# handle case with OS X adding /private/ to /tmp/ paths
assert result
def test_start_directory_relative(session, tmpdir):
"""Same as above test, but with relative start directory, mimicing
loading it from a location of project file. Like::
$ tmuxp load ~/workspace/myproject/.tmuxp.yaml
instead of::
$ cd ~/workspace/myproject/.tmuxp.yaml
$ tmuxp load .
"""
yaml_config = loadfixture("workspacebuilder/start_directory_relative.yaml")
test_dir = str(tmpdir.mkdir('foo bar'))
config_dir = str(tmpdir.mkdir('testRelConfigDir'))
test_config = yaml_config.format(TEST_DIR=test_dir)
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(test_config).get()
# the second argument of os.getcwd() mimics the behavior
# the CLI loader will do, but it passes in the config file's location.
sconfig = config.expand(sconfig, config_dir)
sconfig = config.trickle(sconfig)
assert os.path.exists(config_dir)
assert os.path.exists(test_dir)
builder = WorkspaceBuilder(sconf=sconfig)
builder.build(session=session)
assert session == builder.session
dirs = ['/usr/bin', '/dev', test_dir, config_dir, config_dir]
for path, window in zip(dirs, session.windows):
for p in window.panes:
while retry():
p.server._update_panes()
# Handle case where directories resolve to /private/ in OSX
pane_path = p.current_path
if pane_path is None:
pass
elif path in pane_path or pane_path == path:
result = path == pane_path or path in pane_path
break
assert result
def test_pane_order(session):
"""Pane ordering based on position in config and ``pane_index``.
Regression test for https://github.com/tmux-python/tmuxp/issues/15.
"""
yaml_config = loadfixture("workspacebuilder/pane_ordering.yaml").format(
HOME=os.path.realpath(os.path.expanduser('~'))
)
# test order of `panes` (and pane_index) above aganist pane_dirs
pane_paths = [
'/usr/bin',
'/usr',
'/usr/sbin',
os.path.realpath(os.path.expanduser('~')),
]
s = session
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
sconfig = config.expand(sconfig)
sconfig = config.trickle(sconfig)
builder = WorkspaceBuilder(sconf=sconfig)
window_count = len(session._windows) # current window count
assert len(s._windows) == window_count
for w, wconf in builder.iter_create_windows(s):
for p in builder.iter_create_panes(w, wconf):
w.select_layout('tiled') # fix glitch with pane size
p = p
assert len(s._windows) == window_count
assert isinstance(w, Window)
assert len(s._windows) == window_count
window_count += 1
for w in session.windows:
pane_base_index = w.show_window_option('pane-base-index', g=True)
for p_index, p in enumerate(w.list_panes(), start=pane_base_index):
assert int(p_index) == int(p.index)
# pane-base-index start at base-index, pane_paths always start
# at 0 since python list.
pane_path = pane_paths[p_index - pane_base_index]
while retry():
p.server._update_panes()
if p.current_path == pane_path:
break
assert p.current_path, pane_path
def test_window_index(session):
yaml_config = loadfixture("workspacebuilder/window_index.yaml")
proc = session.cmd('show-option', '-gv', 'base-index')
base_index = int(proc.stdout[0])
name_index_map = {'zero': 0 + base_index, 'one': 1 + base_index, 'five': 5}
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
sconfig = config.expand(sconfig)
sconfig = config.trickle(sconfig)
builder = WorkspaceBuilder(sconf=sconfig)
for window, _ in builder.iter_create_windows(session):
expected_index = name_index_map[window['window_name']]
assert int(window['window_index']) == expected_index
def test_before_load_throw_error_if_retcode_error(server):
config_script_fails = loadfixture("workspacebuilder/config_script_fails.yaml")
sconfig = kaptan.Kaptan(handler='yaml')
yaml = config_script_fails.format(
fixtures_dir=fixtures_dir,
script_failed=os.path.join(fixtures_dir, 'script_failed.sh'),
)
sconfig = sconfig.import_config(yaml).get()
sconfig = config.expand(sconfig)
sconfig = config.trickle(sconfig)
builder = WorkspaceBuilder(sconf=sconfig)
with temp_session(server) as sess:
session_name = sess.name
with pytest.raises(exc.BeforeLoadScriptError):
builder.build(session=sess)
result = server.has_session(session_name)
assert not result, "Kills session if before_script exits with errcode"
def test_before_load_throw_error_if_file_not_exists(server):
config_script_not_exists = loadfixture(
"workspacebuilder/config_script_not_exists.yaml"
)
sconfig = kaptan.Kaptan(handler='yaml')
yaml = config_script_not_exists.format(
fixtures_dir=fixtures_dir,
script_not_exists=os.path.join(fixtures_dir, 'script_not_exists.sh'),
)
sconfig = sconfig.import_config(yaml).get()
sconfig = config.expand(sconfig)
sconfig = config.trickle(sconfig)
builder = WorkspaceBuilder(sconf=sconfig)
with temp_session(server) as sess:
session_name = sess.name
temp_session_exists = server.has_session(sess.name)
assert temp_session_exists
with pytest.raises((exc.BeforeLoadScriptNotExists, OSError)) as excinfo:
builder.build(session=sess)
excinfo.match(r'No such file or directory')
result = server.has_session(session_name)
assert not result, "Kills session if before_script doesn't exist"
def test_before_load_true_if_test_passes(server):
config_script_completes = loadfixture(
"workspacebuilder/config_script_completes.yaml"
)
assert os.path.exists(os.path.join(fixtures_dir, 'script_complete.sh'))
sconfig = kaptan.Kaptan(handler='yaml')
yaml = config_script_completes.format(
fixtures_dir=fixtures_dir,
script_complete=os.path.join(fixtures_dir, 'script_complete.sh'),
)
sconfig = sconfig.import_config(yaml).get()
sconfig = config.expand(sconfig)
sconfig = config.trickle(sconfig)
builder = WorkspaceBuilder(sconf=sconfig)
with temp_session(server) as session:
builder.build(session=session)
def test_before_load_true_if_test_passes_with_args(server):
config_script_completes = loadfixture(
"workspacebuilder/config_script_completes.yaml"
)
assert os.path.exists(os.path.join(fixtures_dir, 'script_complete.sh'))
sconfig = kaptan.Kaptan(handler='yaml')
yaml = config_script_completes.format(
fixtures_dir=fixtures_dir,
script_complete=os.path.join(fixtures_dir, 'script_complete.sh') + ' -v',
)
sconfig = sconfig.import_config(yaml).get()
sconfig = config.expand(sconfig)
sconfig = config.trickle(sconfig)
builder = WorkspaceBuilder(sconf=sconfig)
with temp_session(server) as session:
builder.build(session=session)
|
import asyncio
import base64
import io
import pytest
from homeassistant.components import camera
from homeassistant.components.camera.const import DOMAIN, PREF_PRELOAD_STREAM
from homeassistant.components.camera.prefs import CameraEntityPreferences
from homeassistant.components.websocket_api.const import TYPE_RESULT
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import ATTR_ENTITY_ID, EVENT_HOMEASSISTANT_START
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, PropertyMock, mock_open, patch
from tests.components.camera import common
@pytest.fixture(name="mock_camera")
async def mock_camera_fixture(hass):
"""Initialize a demo camera platform."""
assert await async_setup_component(
hass, "camera", {camera.DOMAIN: {"platform": "demo"}}
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.demo.camera.Path.read_bytes",
return_value=b"Test",
):
yield
@pytest.fixture(name="mock_stream")
def mock_stream_fixture(hass):
"""Initialize a demo camera platform with streaming."""
assert hass.loop.run_until_complete(
async_setup_component(hass, "stream", {"stream": {}})
)
@pytest.fixture(name="setup_camera_prefs")
def setup_camera_prefs_fixture(hass):
"""Initialize HTTP API."""
return common.mock_camera_prefs(hass, "camera.demo_camera")
@pytest.fixture(name="image_mock_url")
async def image_mock_url_fixture(hass):
"""Fixture for get_image tests."""
await async_setup_component(
hass, camera.DOMAIN, {camera.DOMAIN: {"platform": "demo"}}
)
await hass.async_block_till_done()
async def test_get_image_from_camera(hass, image_mock_url):
"""Grab an image from camera entity."""
with patch(
"homeassistant.components.demo.camera.Path.read_bytes",
autospec=True,
return_value=b"Test",
) as mock_camera:
image = await camera.async_get_image(hass, "camera.demo_camera")
assert mock_camera.called
assert image.content == b"Test"
async def test_get_stream_source_from_camera(hass, mock_camera):
"""Fetch stream source from camera entity."""
with patch(
"homeassistant.components.camera.Camera.stream_source",
return_value="rtsp://127.0.0.1/stream",
) as mock_camera_stream_source:
stream_source = await camera.async_get_stream_source(hass, "camera.demo_camera")
assert mock_camera_stream_source.called
assert stream_source == "rtsp://127.0.0.1/stream"
async def test_get_image_without_exists_camera(hass, image_mock_url):
"""Try to get image without exists camera."""
with patch(
"homeassistant.helpers.entity_component.EntityComponent.get_entity",
return_value=None,
), pytest.raises(HomeAssistantError):
await camera.async_get_image(hass, "camera.demo_camera")
async def test_get_image_with_timeout(hass, image_mock_url):
"""Try to get image with timeout."""
with patch(
"homeassistant.components.demo.camera.DemoCamera.async_camera_image",
side_effect=asyncio.TimeoutError,
), pytest.raises(HomeAssistantError):
await camera.async_get_image(hass, "camera.demo_camera")
async def test_get_image_fails(hass, image_mock_url):
"""Try to get image with timeout."""
with patch(
"homeassistant.components.demo.camera.DemoCamera.async_camera_image",
return_value=None,
), pytest.raises(HomeAssistantError):
await camera.async_get_image(hass, "camera.demo_camera")
async def test_snapshot_service(hass, mock_camera):
"""Test snapshot service."""
mopen = mock_open()
with patch("homeassistant.components.camera.open", mopen, create=True), patch(
"homeassistant.components.camera.os.path.exists",
Mock(spec="os.path.exists", return_value=True),
), patch.object(hass.config, "is_allowed_path", return_value=True):
await hass.services.async_call(
camera.DOMAIN,
camera.SERVICE_SNAPSHOT,
{
ATTR_ENTITY_ID: "camera.demo_camera",
camera.ATTR_FILENAME: "/test/snapshot.jpg",
},
blocking=True,
)
mock_write = mopen().write
assert len(mock_write.mock_calls) == 1
assert mock_write.mock_calls[0][1][0] == b"Test"
async def test_websocket_camera_thumbnail(hass, hass_ws_client, mock_camera):
"""Test camera_thumbnail websocket command."""
await async_setup_component(hass, "camera", {})
client = await hass_ws_client(hass)
await client.send_json(
{"id": 5, "type": "camera_thumbnail", "entity_id": "camera.demo_camera"}
)
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"]["content_type"] == "image/jpeg"
assert msg["result"]["content"] == base64.b64encode(b"Test").decode("utf-8")
async def test_websocket_stream_no_source(
hass, hass_ws_client, mock_camera, mock_stream
):
"""Test camera/stream websocket command."""
await async_setup_component(hass, "camera", {})
with patch(
"homeassistant.components.camera.request_stream",
return_value="http://home.assistant/playlist.m3u8",
) as mock_request_stream:
# Request playlist through WebSocket
client = await hass_ws_client(hass)
await client.send_json(
{"id": 6, "type": "camera/stream", "entity_id": "camera.demo_camera"}
)
msg = await client.receive_json()
# Assert WebSocket response
assert not mock_request_stream.called
assert msg["id"] == 6
assert msg["type"] == TYPE_RESULT
assert not msg["success"]
async def test_websocket_camera_stream(hass, hass_ws_client, mock_camera, mock_stream):
"""Test camera/stream websocket command."""
await async_setup_component(hass, "camera", {})
with patch(
"homeassistant.components.camera.request_stream",
return_value="http://home.assistant/playlist.m3u8",
) as mock_request_stream, patch(
"homeassistant.components.demo.camera.DemoCamera.stream_source",
return_value="http://example.com",
):
# Request playlist through WebSocket
client = await hass_ws_client(hass)
await client.send_json(
{"id": 6, "type": "camera/stream", "entity_id": "camera.demo_camera"}
)
msg = await client.receive_json()
# Assert WebSocket response
assert mock_request_stream.called
assert msg["id"] == 6
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"]["url"][-13:] == "playlist.m3u8"
async def test_websocket_get_prefs(hass, hass_ws_client, mock_camera):
"""Test get camera preferences websocket command."""
await async_setup_component(hass, "camera", {})
# Request preferences through websocket
client = await hass_ws_client(hass)
await client.send_json(
{"id": 7, "type": "camera/get_prefs", "entity_id": "camera.demo_camera"}
)
msg = await client.receive_json()
# Assert WebSocket response
assert msg["success"]
async def test_websocket_update_prefs(
hass, hass_ws_client, mock_camera, setup_camera_prefs
):
"""Test updating preference."""
await async_setup_component(hass, "camera", {})
assert setup_camera_prefs[PREF_PRELOAD_STREAM]
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 8,
"type": "camera/update_prefs",
"entity_id": "camera.demo_camera",
"preload_stream": False,
}
)
response = await client.receive_json()
assert response["success"]
assert not setup_camera_prefs[PREF_PRELOAD_STREAM]
assert (
response["result"][PREF_PRELOAD_STREAM]
== setup_camera_prefs[PREF_PRELOAD_STREAM]
)
async def test_play_stream_service_no_source(hass, mock_camera, mock_stream):
"""Test camera play_stream service."""
data = {
ATTR_ENTITY_ID: "camera.demo_camera",
camera.ATTR_MEDIA_PLAYER: "media_player.test",
}
with patch("homeassistant.components.camera.request_stream"), pytest.raises(
HomeAssistantError
):
# Call service
await hass.services.async_call(
camera.DOMAIN, camera.SERVICE_PLAY_STREAM, data, blocking=True
)
async def test_handle_play_stream_service(hass, mock_camera, mock_stream):
"""Test camera play_stream service."""
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com"},
)
await async_setup_component(hass, "media_player", {})
with patch(
"homeassistant.components.camera.request_stream"
) as mock_request_stream, patch(
"homeassistant.components.demo.camera.DemoCamera.stream_source",
return_value="http://example.com",
):
# Call service
await hass.services.async_call(
camera.DOMAIN,
camera.SERVICE_PLAY_STREAM,
{
ATTR_ENTITY_ID: "camera.demo_camera",
camera.ATTR_MEDIA_PLAYER: "media_player.test",
},
blocking=True,
)
# So long as we request the stream, the rest should be covered
# by the play_media service tests.
assert mock_request_stream.called
async def test_no_preload_stream(hass, mock_stream):
"""Test camera preload preference."""
demo_prefs = CameraEntityPreferences({PREF_PRELOAD_STREAM: False})
with patch(
"homeassistant.components.camera.request_stream"
) as mock_request_stream, patch(
"homeassistant.components.camera.prefs.CameraPreferences.get",
return_value=demo_prefs,
), patch(
"homeassistant.components.demo.camera.DemoCamera.stream_source",
new_callable=PropertyMock,
) as mock_stream_source:
mock_stream_source.return_value = io.BytesIO()
await async_setup_component(hass, "camera", {DOMAIN: {"platform": "demo"}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert not mock_request_stream.called
async def test_preload_stream(hass, mock_stream):
"""Test camera preload preference."""
demo_prefs = CameraEntityPreferences({PREF_PRELOAD_STREAM: True})
with patch(
"homeassistant.components.camera.request_stream"
) as mock_request_stream, patch(
"homeassistant.components.camera.prefs.CameraPreferences.get",
return_value=demo_prefs,
), patch(
"homeassistant.components.demo.camera.DemoCamera.stream_source",
return_value="http://example.com",
):
assert await async_setup_component(
hass, "camera", {DOMAIN: {"platform": "demo"}}
)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert mock_request_stream.called
async def test_record_service_invalid_path(hass, mock_camera):
"""Test record service with invalid path."""
with patch.object(
hass.config, "is_allowed_path", return_value=False
), pytest.raises(HomeAssistantError):
# Call service
await hass.services.async_call(
camera.DOMAIN,
camera.SERVICE_RECORD,
{
ATTR_ENTITY_ID: "camera.demo_camera",
camera.CONF_FILENAME: "/my/invalid/path",
},
blocking=True,
)
async def test_record_service(hass, mock_camera, mock_stream):
"""Test record service."""
with patch(
"homeassistant.components.demo.camera.DemoCamera.stream_source",
return_value="http://example.com",
), patch(
"homeassistant.components.stream.async_handle_record_service",
) as mock_record_service, patch.object(
hass.config, "is_allowed_path", return_value=True
):
# Call service
await hass.services.async_call(
camera.DOMAIN,
camera.SERVICE_RECORD,
{ATTR_ENTITY_ID: "camera.demo_camera", camera.CONF_FILENAME: "/my/path"},
blocking=True,
)
# So long as we call stream.record, the rest should be covered
# by those tests.
assert mock_record_service.called
|
from openzwavemqtt.const import CommandClass, ValueIndex, ValueType
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_DOOR,
DEVICE_CLASS_GAS,
DEVICE_CLASS_HEAT,
DEVICE_CLASS_LOCK,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_POWER,
DEVICE_CLASS_PROBLEM,
DEVICE_CLASS_SAFETY,
DEVICE_CLASS_SMOKE,
DEVICE_CLASS_SOUND,
DOMAIN as BINARY_SENSOR_DOMAIN,
BinarySensorEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DATA_UNSUBSCRIBE, DOMAIN
from .entity import ZWaveDeviceEntity
NOTIFICATION_TYPE = "index"
NOTIFICATION_VALUES = "values"
NOTIFICATION_DEVICE_CLASS = "device_class"
NOTIFICATION_SENSOR_ENABLED = "enabled"
NOTIFICATION_OFF_VALUE = "off_value"
NOTIFICATION_VALUE_CLEAR = 0
# Translation from values in Notification CC to binary sensors
# https://github.com/OpenZWave/open-zwave/blob/master/config/NotificationCCTypes.xml
NOTIFICATION_SENSORS = [
{
# Index 1: Smoke Alarm - Value Id's 1 and 2
# Assuming here that Value 1 and 2 are not present at the same time
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_SMOKE_ALARM,
NOTIFICATION_VALUES: [1, 2],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_SMOKE,
},
{
# Index 1: Smoke Alarm - All other Value Id's
# Create as disabled sensors
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_SMOKE_ALARM,
NOTIFICATION_VALUES: [3, 4, 5, 6, 7, 8],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_SMOKE,
NOTIFICATION_SENSOR_ENABLED: False,
},
{
# Index 2: Carbon Monoxide - Value Id's 1 and 2
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_CARBON_MONOOXIDE,
NOTIFICATION_VALUES: [1, 2],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_GAS,
},
{
# Index 2: Carbon Monoxide - All other Value Id's
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_CARBON_MONOOXIDE,
NOTIFICATION_VALUES: [4, 5, 7],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_GAS,
NOTIFICATION_SENSOR_ENABLED: False,
},
{
# Index 3: Carbon Dioxide - Value Id's 1 and 2
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_CARBON_DIOXIDE,
NOTIFICATION_VALUES: [1, 2],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_GAS,
},
{
# Index 3: Carbon Dioxide - All other Value Id's
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_CARBON_DIOXIDE,
NOTIFICATION_VALUES: [4, 5, 7],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_GAS,
NOTIFICATION_SENSOR_ENABLED: False,
},
{
# Index 4: Heat - Value Id's 1, 2, 5, 6 (heat/underheat)
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_HEAT,
NOTIFICATION_VALUES: [1, 2, 5, 6],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_HEAT,
},
{
# Index 4: Heat - All other Value Id's
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_HEAT,
NOTIFICATION_VALUES: [3, 4, 8, 10, 11],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_HEAT,
NOTIFICATION_SENSOR_ENABLED: False,
},
{
# Index 5: Water - Value Id's 1, 2, 3, 4
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_WATER,
NOTIFICATION_VALUES: [1, 2, 3, 4],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_MOISTURE,
},
{
# Index 5: Water - All other Value Id's
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_WATER,
NOTIFICATION_VALUES: [5],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_MOISTURE,
NOTIFICATION_SENSOR_ENABLED: False,
},
{
# Index 6: Access Control - Value Id's 1, 2, 3, 4 (Lock)
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_ACCESS_CONTROL,
NOTIFICATION_VALUES: [1, 2, 3, 4],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_LOCK,
},
{
# Index 6: Access Control - Value Id 22 (door/window open)
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_ACCESS_CONTROL,
NOTIFICATION_VALUES: [22],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_DOOR,
NOTIFICATION_OFF_VALUE: 23,
},
{
# Index 7: Home Security - Value Id's 1, 2 (intrusion)
# Assuming that value 1 and 2 are not present at the same time
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_HOME_SECURITY,
NOTIFICATION_VALUES: [1, 2],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_SAFETY,
},
{
# Index 7: Home Security - Value Id's 3, 4, 9 (tampering)
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_HOME_SECURITY,
NOTIFICATION_VALUES: [3, 4, 9],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_SAFETY,
},
{
# Index 7: Home Security - Value Id's 5, 6 (glass breakage)
# Assuming that value 5 and 6 are not present at the same time
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_HOME_SECURITY,
NOTIFICATION_VALUES: [5, 6],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_SAFETY,
},
{
# Index 7: Home Security - Value Id's 7, 8 (motion)
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_HOME_SECURITY,
NOTIFICATION_VALUES: [7, 8],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_MOTION,
},
{
# Index 8: Power management - Values 1...9
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_POWER_MANAGEMENT,
NOTIFICATION_VALUES: [1, 2, 3, 4, 5, 6, 7, 8, 9],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_POWER,
NOTIFICATION_SENSOR_ENABLED: False,
},
{
# Index 8: Power management - Values 10...15
# Battery values (mutually exclusive)
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_POWER_MANAGEMENT,
NOTIFICATION_VALUES: [10, 11, 12, 13, 14, 15],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_POWER,
NOTIFICATION_SENSOR_ENABLED: False,
NOTIFICATION_OFF_VALUE: None,
},
{
# Index 9: System - Value Id's 1, 2, 6, 7
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_SYSTEM,
NOTIFICATION_VALUES: [1, 2, 6, 7],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_PROBLEM,
NOTIFICATION_SENSOR_ENABLED: False,
},
{
# Index 10: Emergency - Value Id's 1, 2, 3
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_EMERGENCY,
NOTIFICATION_VALUES: [1, 2, 3],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_PROBLEM,
},
{
# Index 11: Clock - Value Id's 1, 2
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_CLOCK,
NOTIFICATION_VALUES: [1, 2],
NOTIFICATION_DEVICE_CLASS: None,
NOTIFICATION_SENSOR_ENABLED: False,
},
{
# Index 12: Appliance - All Value Id's
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_APPLIANCE,
NOTIFICATION_VALUES: [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
],
NOTIFICATION_DEVICE_CLASS: None,
},
{
# Index 13: Home Health - Value Id's 1,2,3,4,5
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_APPLIANCE,
NOTIFICATION_VALUES: [1, 2, 3, 4, 5],
NOTIFICATION_DEVICE_CLASS: None,
},
{
# Index 14: Siren
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_SIREN,
NOTIFICATION_VALUES: [1],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_SOUND,
},
{
# Index 15: Water valve
# ignore non-boolean values
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_WATER_VALVE,
NOTIFICATION_VALUES: [3, 4],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_PROBLEM,
},
{
# Index 16: Weather
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_WEATHER,
NOTIFICATION_VALUES: [1, 2],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_PROBLEM,
},
{
# Index 17: Irrigation
# ignore non-boolean values
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_IRRIGATION,
NOTIFICATION_VALUES: [1, 2, 3, 4, 5],
NOTIFICATION_DEVICE_CLASS: None,
},
{
# Index 18: Gas
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_GAS,
NOTIFICATION_VALUES: [1, 2, 3, 4],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_GAS,
},
{
# Index 18: Gas
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_GAS,
NOTIFICATION_VALUES: [6],
NOTIFICATION_DEVICE_CLASS: DEVICE_CLASS_PROBLEM,
},
]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Z-Wave binary_sensor from config entry."""
@callback
def async_add_binary_sensor(values):
"""Add Z-Wave Binary Sensor(s)."""
async_add_entities(VALUE_TYPE_SENSORS[values.primary.type](values))
hass.data[DOMAIN][config_entry.entry_id][DATA_UNSUBSCRIBE].append(
async_dispatcher_connect(
hass, f"{DOMAIN}_new_{BINARY_SENSOR_DOMAIN}", async_add_binary_sensor
)
)
@callback
def async_get_legacy_binary_sensors(values):
"""Add Legacy/classic Z-Wave Binary Sensor."""
return [ZWaveBinarySensor(values)]
@callback
def async_get_notification_sensors(values):
"""Convert Notification values into binary sensors."""
sensors_to_add = []
for list_value in values.primary.value["List"]:
# check if we have a mapping for this value
for item in NOTIFICATION_SENSORS:
if item[NOTIFICATION_TYPE] != values.primary.index:
continue
if list_value["Value"] not in item[NOTIFICATION_VALUES]:
continue
sensors_to_add.append(
ZWaveListValueSensor(
# required values
values,
list_value["Value"],
item[NOTIFICATION_DEVICE_CLASS],
# optional values
item.get(NOTIFICATION_SENSOR_ENABLED, True),
item.get(NOTIFICATION_OFF_VALUE, NOTIFICATION_VALUE_CLEAR),
)
)
return sensors_to_add
VALUE_TYPE_SENSORS = {
ValueType.BOOL: async_get_legacy_binary_sensors,
ValueType.LIST: async_get_notification_sensors,
}
class ZWaveBinarySensor(ZWaveDeviceEntity, BinarySensorEntity):
"""Representation of a Z-Wave binary_sensor."""
@property
def is_on(self):
"""Return if the sensor is on or off."""
return self.values.primary.value
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
# Legacy binary sensors are phased out (replaced by notification sensors)
# Disable by default to not confuse users
for item in self.values.primary.node.values():
if item.command_class == CommandClass.NOTIFICATION:
# This device properly implements the Notification CC, legacy sensor can be disabled
return False
return True
class ZWaveListValueSensor(ZWaveDeviceEntity, BinarySensorEntity):
"""Representation of a binary_sensor from values in the Z-Wave Notification CommandClass."""
def __init__(
self,
values,
on_value,
device_class=None,
default_enabled=True,
off_value=NOTIFICATION_VALUE_CLEAR,
):
"""Initialize a ZWaveListValueSensor entity."""
super().__init__(values)
self._on_value = on_value
self._device_class = device_class
self._default_enabled = default_enabled
self._off_value = off_value
# make sure the correct value is selected at startup
self._state = False
self.on_value_update()
@callback
def on_value_update(self):
"""Call when a value is added/updated in the underlying EntityValues Collection."""
if self.values.primary.value["Selected_id"] == self._on_value:
# Only when the active ID exactly matches our watched ON value, set sensor state to ON
self._state = True
elif self.values.primary.value["Selected_id"] == self._off_value:
# Only when the active ID exactly matches our watched OFF value, set sensor state to OFF
self._state = False
elif (
self._off_value is None
and self.values.primary.value["Selected_id"] != self._on_value
):
# Off value not explicitly specified
# Some values are reset by the simple fact they're overruled by another value coming in
# For example the battery charging values in Power Management Index
self._state = False
@property
def name(self):
"""Return the name of the entity."""
# Append value label to base name
base_name = super().name
value_label = ""
for item in self.values.primary.value["List"]:
if item["Value"] == self._on_value:
value_label = item["Label"]
break
# Strip "on location" / "at location" from name
# Note: We're assuming that we don't retrieve 2 values with different location
value_label = value_label.split(" on ")[0]
value_label = value_label.split(" at ")[0]
return f"{base_name}: {value_label}"
@property
def unique_id(self):
"""Return the unique_id of the entity."""
unique_id = super().unique_id
return f"{unique_id}.{self._on_value}"
@property
def is_on(self):
"""Return if the sensor is on or off."""
return self._state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return self._device_class
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
# We hide the more advanced sensors by default to not overwhelm users
return self._default_enabled
|
import json
import logging
from homeassistant.components import mqtt
from homeassistant.const import DEGREE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
_LOGGER = logging.getLogger(__name__)
DOMAIN = "arwn"
DATA_ARWN = "arwn"
TOPIC = "arwn/#"
def discover_sensors(topic, payload):
"""Given a topic, dynamically create the right sensor type.
Async friendly.
"""
parts = topic.split("/")
unit = payload.get("units", "")
domain = parts[1]
if domain == "temperature":
name = parts[2]
if unit == "F":
unit = TEMP_FAHRENHEIT
else:
unit = TEMP_CELSIUS
return ArwnSensor(name, "temp", unit)
if domain == "moisture":
name = f"{parts[2]} Moisture"
return ArwnSensor(name, "moisture", unit, "mdi:water-percent")
if domain == "rain":
if len(parts) >= 3 and parts[2] == "today":
return ArwnSensor(
"Rain Since Midnight", "since_midnight", "in", "mdi:water"
)
return (
ArwnSensor("Total Rainfall", "total", unit, "mdi:water"),
ArwnSensor("Rainfall Rate", "rate", unit, "mdi:water"),
)
if domain == "barometer":
return ArwnSensor("Barometer", "pressure", unit, "mdi:thermometer-lines")
if domain == "wind":
return (
ArwnSensor("Wind Speed", "speed", unit, "mdi:speedometer"),
ArwnSensor("Wind Gust", "gust", unit, "mdi:speedometer"),
ArwnSensor("Wind Direction", "direction", DEGREE, "mdi:compass"),
)
def _slug(name):
return f"sensor.arwn_{slugify(name)}"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the ARWN platform."""
@callback
def async_sensor_event_received(msg):
"""Process events as sensors.
When a new event on our topic (arwn/#) is received we map it
into a known kind of sensor based on topic name. If we've
never seen this before, we keep this sensor around in a global
cache. If we have seen it before, we update the values of the
existing sensor. Either way, we push an ha state update at the
end for the new event we've seen.
This lets us dynamically incorporate sensors without any
configuration on our side.
"""
event = json.loads(msg.payload)
sensors = discover_sensors(msg.topic, event)
if not sensors:
return
store = hass.data.get(DATA_ARWN)
if store is None:
store = hass.data[DATA_ARWN] = {}
if isinstance(sensors, ArwnSensor):
sensors = (sensors,)
if "timestamp" in event:
del event["timestamp"]
for sensor in sensors:
if sensor.name not in store:
sensor.hass = hass
sensor.set_event(event)
store[sensor.name] = sensor
_LOGGER.debug(
"Registering new sensor %(name)s => %(event)s",
{"name": sensor.name, "event": event},
)
async_add_entities((sensor,), True)
else:
store[sensor.name].set_event(event)
await mqtt.async_subscribe(hass, TOPIC, async_sensor_event_received, 0)
return True
class ArwnSensor(Entity):
"""Representation of an ARWN sensor."""
def __init__(self, name, state_key, units, icon=None):
"""Initialize the sensor."""
self.hass = None
self.entity_id = _slug(name)
self._name = name
self._state_key = state_key
self.event = {}
self._unit_of_measurement = units
self._icon = icon
def set_event(self, event):
"""Update the sensor with the most recent event."""
self.event = {}
self.event.update(event)
self.async_write_ha_state()
@property
def state(self):
"""Return the state of the device."""
return self.event.get(self._state_key, None)
@property
def name(self):
"""Get the name of the sensor."""
return self._name
@property
def state_attributes(self):
"""Return all the state attributes."""
return self.event
@property
def unit_of_measurement(self):
"""Return the unit of measurement the state is expressed in."""
return self._unit_of_measurement
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def icon(self):
"""Return the icon of device based on its type."""
return self._icon
|
import argparse
from src.training_data import download_and_serialize
def create_parser():
"""Create the argparse parser."""
parser = argparse.ArgumentParser()
parser.add_argument("--tile-size",
default=64,
type=int,
help="tile the NAIP and training data into NxN tiles with this dimension")
parser.add_argument("--tile-overlap",
default=1,
type=int,
help="divide the tile-size by this arg for how many pixels to move over "
"when tiling data. this is set to 1 by default, so tiles don't "
"overlap. setting it to 2 would make tiles overlap by half, and "
"setting it to 3 would make the tiles overlap by 2/3rds")
parser.add_argument("--pixels-to-fatten-roads",
default=3,
type=int,
help="the number of px to fatten a road centerline (e.g. the default 3 "
"makes roads 7px wide)")
parser.add_argument("--percent-for-training-data",
default=.90,
type=float,
help="how much data to allocate for training. the remainder is left for "
"test")
parser.add_argument("--bands",
default=[1, 1, 1, 1],
nargs=4,
type=int,
help="specify which bands to activate (R G B IR)"
"--bands 1 1 1 1 (which activates only all bands)")
parser.add_argument(
"--label-data-files",
nargs='+',
default=[
'http://download.geofabrik.de/north-america/us/delaware-latest.osm.pbf',
],
type=str,
help="PBF files to extract road/feature label info from")
parser.add_argument("--naip-path",
default=['de', '2013'],
nargs=2,
type=str,
help="specify the state and year for the NAIPs to analyze"
"--naip-path de 2013 (defaults to some Delaware data)")
parser.add_argument("--randomize-naips",
default=False,
action='store_false',
help="turn on this arg if you don't want to get NAIPs in order from the "
"bucket path")
parser.add_argument("--number-of-naips",
default=6,
type=int,
help="the number of naip images to analyze, 30+ sq. km each")
parser.add_argument("--extract-type",
default='highway',
choices=['highway', 'tennis', 'footway', 'cycleway'],
help="the type of feature to identify")
parser.add_argument("--save-clippings",
action='store_true',
help="save the training data tiles to /data/naip")
return parser
def main():
"""Download and serialize training data."""
args = create_parser().parse_args()
naip_state, naip_year = args.naip_path
download_and_serialize(args.number_of_naips,
args.randomize_naips,
naip_state,
naip_year,
args.extract_type,
args.bands,
args.tile_size,
args.pixels_to_fatten_roads,
args.label_data_files,
args.tile_overlap)
if __name__ == "__main__":
main()
|
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import alembic_autogenerate_enums
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
db_url_escaped = current_app.config.get('SQLALCHEMY_DATABASE_URI').replace('%', '%%')
config.set_main_option(
"sqlalchemy.url", db_url_escaped
)
target_metadata = current_app.extensions["migrate"].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
**current_app.extensions["migrate"].configure_args,
compare_type=True
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
import os.path
from datetime import timedelta
from io import StringIO
from django.core import mail
from django.core.exceptions import ValidationError
from django.core.management import call_command
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from django.utils import timezone
from weblate.auth.models import User
from weblate.billing.models import Billing, Invoice, Plan
from weblate.billing.tasks import (
billing_alert,
billing_check,
notify_expired,
perform_removal,
schedule_removal,
)
from weblate.trans.models import Project
from weblate.trans.tests.test_models import RepoTestCase
from weblate.trans.tests.utils import create_test_billing
TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test-data")
class BillingTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="bill", password="kill", email="[email protected]"
)
self.billing = create_test_billing(self.user, invoice=False)
self.plan = self.billing.plan
self.invoice = Invoice.objects.create(
billing=self.billing,
start=timezone.now().date() - timedelta(days=2),
end=timezone.now().date() + timedelta(days=2),
amount=10,
ref="00000",
)
self.projectnum = 0
def refresh_from_db(self):
self.billing = Billing.objects.get(pk=self.billing.pk)
def add_project(self):
name = f"test{self.projectnum}"
self.projectnum += 1
project = Project.objects.create(
name=name, slug=name, access_control=Project.ACCESS_PROTECTED
)
self.billing.projects.add(project)
project.add_user(self.user, "@Billing")
def test_view_billing(self):
self.add_project()
# Not authenticated
response = self.client.get(reverse("billing"))
self.assertEqual(302, response.status_code)
# Random user
User.objects.create_user("foo", "[email protected]", "bar")
self.client.login(username="foo", password="bar")
response = self.client.get(reverse("billing"))
self.assertNotContains(response, "Current plan")
# Owner
self.client.login(username="bill", password="kill")
response = self.client.get(reverse("billing"), follow=True)
self.assertRedirects(response, self.billing.get_absolute_url())
self.assertContains(response, "Current plan")
# Admin
self.user.is_superuser = True
self.user.save()
response = self.client.get(reverse("billing"))
self.assertContains(response, "Owners")
def test_limit_projects(self):
self.assertTrue(self.billing.in_limits)
self.add_project()
self.refresh_from_db()
self.assertTrue(self.billing.in_limits)
self.add_project()
self.refresh_from_db()
self.assertFalse(self.billing.in_limits)
def test_commands(self):
out = StringIO()
call_command("billing_check", stdout=out)
self.assertEqual(out.getvalue(), "")
self.add_project()
self.add_project()
out = StringIO()
call_command("billing_check", stdout=out)
self.assertEqual(
out.getvalue(),
"Following billings are over limit:\n" " * test0, test1 (Basic plan)\n",
)
out = StringIO()
call_command("billing_check", "--valid", stdout=out)
self.assertEqual(out.getvalue(), "")
self.invoice.delete()
out = StringIO()
call_command("billing_check", stdout=out)
self.assertEqual(
out.getvalue(),
"Following billings are over limit:\n"
" * test0, test1 (Basic plan)\n"
"Following billings are past due date:\n"
" * test0, test1 (Basic plan)\n",
)
call_command("billing_check", "--notify", stdout=out)
self.assertEqual(len(mail.outbox), 1)
def test_invoice_validation(self):
invoice = Invoice(
billing=self.billing,
start=self.invoice.start,
end=self.invoice.end,
amount=30,
)
# Full overlap
with self.assertRaises(ValidationError):
invoice.clean()
# Start overlap
invoice.start = self.invoice.end + timedelta(days=1)
with self.assertRaises(ValidationError):
invoice.clean()
# Zero interval
invoice.end = self.invoice.end + timedelta(days=1)
with self.assertRaises(ValidationError):
invoice.clean()
# Valid after existing
invoice.end = self.invoice.end + timedelta(days=2)
invoice.clean()
# End overlap
invoice.start = self.invoice.start - timedelta(days=4)
invoice.end = self.invoice.end
with self.assertRaises(ValidationError):
invoice.clean()
# Valid before existing
invoice.end = self.invoice.start - timedelta(days=1)
invoice.clean()
# Validation of existing
self.invoice.clean()
@override_settings(INVOICE_PATH=TEST_DATA)
def test_download(self):
self.add_project()
# Unauthenticated
response = self.client.get(
reverse("invoice-download", kwargs={"pk": self.invoice.pk})
)
self.assertEqual(302, response.status_code)
# Not owner
User.objects.create_user("foo", "[email protected]", "bar")
self.client.login(username="foo", password="bar")
response = self.client.get(
reverse("invoice-download", kwargs={"pk": self.invoice.pk})
)
self.assertEqual(403, response.status_code)
# Owner
self.client.login(username="bill", password="kill")
response = self.client.get(
reverse("invoice-download", kwargs={"pk": self.invoice.pk})
)
self.assertContains(response, "PDF-INVOICE")
# Invoice without file
invoice = Invoice.objects.create(
billing=self.billing,
start=timezone.now().date() - timedelta(days=2),
end=timezone.now().date() + timedelta(days=2),
amount=10,
)
response = self.client.get(
reverse("invoice-download", kwargs={"pk": invoice.pk})
)
self.assertEqual(404, response.status_code)
# Invoice with non existing file
invoice.ref = "NON"
invoice.save()
response = self.client.get(
reverse("invoice-download", kwargs={"pk": invoice.pk})
)
self.assertEqual(404, response.status_code)
@override_settings(EMAIL_SUBJECT_PREFIX="")
def test_expiry(self):
self.add_project()
# Paid
schedule_removal()
notify_expired()
perform_removal()
billing_alert()
self.assertEqual(len(mail.outbox), 0)
self.refresh_from_db()
self.assertIsNone(self.billing.removal)
self.assertTrue(self.billing.paid)
self.assertEqual(self.billing.state, Billing.STATE_ACTIVE)
self.assertEqual(self.billing.projects.count(), 1)
# Not paid
self.invoice.start -= timedelta(days=14)
self.invoice.end -= timedelta(days=14)
self.invoice.save()
schedule_removal()
notify_expired()
perform_removal()
billing_alert()
self.assertEqual(len(mail.outbox), 1)
self.refresh_from_db()
self.assertIsNone(self.billing.removal)
self.assertEqual(self.billing.state, Billing.STATE_ACTIVE)
self.assertTrue(self.billing.paid)
self.assertEqual(self.billing.projects.count(), 1)
self.assertEqual(mail.outbox.pop().subject, "Your billing plan has expired")
# Not paid for long
self.invoice.start -= timedelta(days=30)
self.invoice.end -= timedelta(days=30)
self.invoice.save()
schedule_removal()
notify_expired()
perform_removal()
billing_alert()
self.assertEqual(len(mail.outbox), 1)
self.refresh_from_db()
self.assertIsNotNone(self.billing.removal)
self.assertEqual(self.billing.state, Billing.STATE_ACTIVE)
self.assertFalse(self.billing.paid)
self.assertEqual(self.billing.projects.count(), 1)
self.assertEqual(
mail.outbox.pop().subject,
"Your translation project is scheduled for removal",
)
# Final removal
self.billing.removal = timezone.now() - timedelta(days=30)
self.billing.save(skip_limits=True)
perform_removal()
self.refresh_from_db()
self.assertEqual(self.billing.state, Billing.STATE_TERMINATED)
self.assertFalse(self.billing.paid)
self.assertEqual(self.billing.projects.count(), 0)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox.pop().subject, "Your translation project was removed"
)
@override_settings(EMAIL_SUBJECT_PREFIX="")
def test_trial(self):
self.billing.state = Billing.STATE_TRIAL
self.billing.save(skip_limits=True)
self.billing.invoice_set.all().delete()
self.add_project()
# No expiry set
billing_check()
notify_expired()
perform_removal()
self.refresh_from_db()
self.assertEqual(self.billing.state, Billing.STATE_TRIAL)
self.assertTrue(self.billing.paid)
self.assertEqual(self.billing.projects.count(), 1)
self.assertIsNone(self.billing.removal)
self.assertEqual(len(mail.outbox), 0)
# Future expiry
self.billing.expiry = timezone.now() + timedelta(days=30)
self.billing.save(skip_limits=True)
billing_check()
notify_expired()
perform_removal()
self.refresh_from_db()
self.assertEqual(self.billing.state, Billing.STATE_TRIAL)
self.assertTrue(self.billing.paid)
self.assertEqual(self.billing.projects.count(), 1)
self.assertIsNone(self.billing.removal)
self.assertEqual(len(mail.outbox), 0)
# Close expiry
self.billing.expiry = timezone.now() + timedelta(days=1)
self.billing.save(skip_limits=True)
billing_check()
notify_expired()
perform_removal()
self.refresh_from_db()
self.assertEqual(self.billing.state, Billing.STATE_TRIAL)
self.assertTrue(self.billing.paid)
self.assertEqual(self.billing.projects.count(), 1)
self.assertIsNone(self.billing.removal)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox.pop().subject, "Your trial period is about to expire"
)
# Past expiry
self.billing.expiry = timezone.now() - timedelta(days=1)
self.billing.save(skip_limits=True)
billing_check()
notify_expired()
perform_removal()
self.refresh_from_db()
self.assertEqual(self.billing.state, Billing.STATE_TRIAL)
self.assertTrue(self.billing.paid)
self.assertEqual(self.billing.projects.count(), 1)
self.assertIsNone(self.billing.expiry)
self.assertIsNotNone(self.billing.removal)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox.pop().subject,
"Your translation project is scheduled for removal",
)
# There should be notification sent when removal is scheduled
billing_check()
notify_expired()
perform_removal()
self.refresh_from_db()
self.assertEqual(self.billing.state, Billing.STATE_TRIAL)
self.assertTrue(self.billing.paid)
self.assertEqual(self.billing.projects.count(), 1)
self.assertIsNotNone(self.billing.removal)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox.pop().subject,
"Your translation project is scheduled for removal",
)
# Removal
self.billing.removal = timezone.now() - timedelta(days=1)
self.billing.save(skip_limits=True)
billing_check()
perform_removal()
self.refresh_from_db()
self.assertEqual(self.billing.state, Billing.STATE_TERMINATED)
self.assertFalse(self.billing.paid)
self.assertEqual(self.billing.projects.count(), 0)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox.pop().subject, "Your translation project was removed"
)
def test_free_trial(self):
self.plan.price = 0
self.plan.yearly_price = 0
self.plan.save()
self.test_trial()
class HostingTest(RepoTestCase):
def get_user(self):
user = User.objects.create_user(
username="testuser", password="testpassword", full_name="Test User"
)
user.full_name = "First Second"
user.email = "[email protected]"
user.save()
return user
@override_settings(
OFFER_HOSTING=True,
ADMINS_HOSTING=["[email protected]"],
)
def test_hosting(self):
"""Test for hosting form with enabled hosting."""
Plan.objects.create(price=0, slug="libre", name="Libre")
user = self.get_user()
self.client.login(username="testuser", password="testpassword")
response = self.client.get(reverse("hosting"))
self.assertContains(response, "trial")
# Creating a trial
response = self.client.post(reverse("trial"), {"plan": "libre"}, follow=True)
self.assertContains(response, "Create project")
# Flush outbox
mail.outbox = []
# Add component to a trial
component = self.create_component()
billing = user.billing_set.get()
billing.projects.add(component.project)
# Not valid for libre
self.assertFalse(billing.valid_libre)
response = self.client.post(
billing.get_absolute_url(),
{"request": "1", "message": "msg"},
follow=True,
)
self.assertNotContains(response, "Pending approval")
# Add missing license info
component.license = "GPL-3.0-or-later"
component.save()
billing = user.billing_set.get()
# Valid for libre
self.assertTrue(billing.valid_libre)
response = self.client.post(
billing.get_absolute_url(),
{"request": "1", "message": "msg"},
follow=True,
)
self.assertContains(response, "Pending approval")
# Verify message
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject, "[Weblate] Hosting request for Test (Libre, trial)"
)
self.assertIn("testuser", mail.outbox[0].body)
self.assertEqual(mail.outbox[0].to, ["[email protected]"])
# Non-admin approval
response = self.client.post(
billing.get_absolute_url(),
{"approve": "1"},
follow=True,
)
self.assertContains(response, "Pending approval")
# Admin extension
user.is_superuser = True
user.save()
response = self.client.post(
billing.get_absolute_url(),
{"extend": "1"},
follow=True,
)
self.assertContains(response, "Pending approval")
# Admin approval
user.is_superuser = True
user.save()
response = self.client.post(
billing.get_absolute_url(),
{"approve": "1"},
follow=True,
)
self.assertNotContains(response, "Pending approval")
|
import sys
import uuid
from .utils import LOGGER
class ParsingError(Exception):
"""Used for forwarding parsing error messages to apply_shortcodes."""
pass
def _format_position(data, pos):
"""Return position formatted as line/column.
This is used for prettier error messages.
"""
line = 0
col = 0
llb = '' # last line break
for c in data[:pos]:
if c == '\r' or c == '\n':
if llb and c != llb:
llb = ''
else:
line += 1
col = 0
llb = c
else:
col += 1
llb = ''
return "line {0}, column {1}".format(line + 1, col + 1)
def _skip_whitespace(data, pos, must_be_nontrivial=False):
"""Return first position after whitespace.
If must_be_nontrivial is set to True, raises ParsingError
if no whitespace is found.
"""
if must_be_nontrivial:
if pos == len(data) or not data[pos].isspace():
raise ParsingError("Expecting whitespace at {0}!".format(_format_position(data, pos)))
while pos < len(data):
if not data[pos].isspace():
break
pos += 1
return pos
def _skip_nonwhitespace(data, pos):
"""Return first position not before pos which contains a non-whitespace character."""
for i, x in enumerate(data[pos:]):
if x.isspace():
return pos + i
return len(data)
def _parse_quoted_string(data, start):
"""Parse a quoted string starting at position start in data.
Returns the position after the string followed by the string itself.
"""
value = ''
qc = data[start]
pos = start + 1
while pos < len(data):
char = data[pos]
if char == '\\':
if pos + 1 < len(data):
value += data[pos + 1]
pos += 2
else:
raise ParsingError("Unexpected end of data while escaping ({0})".format(_format_position(data, pos)))
elif (char == "'" or char == '"') and char == qc:
return pos + 1, value
else:
value += char
pos += 1
raise ParsingError("Unexpected end of unquoted string (started at {0})!".format(_format_position(data, start)))
def _parse_unquoted_string(data, start, stop_at_equals):
"""Parse an unquoted string starting at position start in data.
Returns the position after the string followed by the string itself.
In case stop_at_equals is set to True, an equal sign will terminate
the string.
"""
value = ''
pos = start
while pos < len(data):
char = data[pos]
if char == '\\':
if pos + 1 < len(data):
value += data[pos + 1]
pos += 2
else:
raise ParsingError("Unexpected end of data while escaping ({0})".format(_format_position(data, pos)))
elif char.isspace():
break
elif char == '=' and stop_at_equals:
break
elif char == "'" or char == '"':
raise ParsingError("Unexpected quotation mark in unquoted string ({0})".format(_format_position(data, pos)))
else:
value += char
pos += 1
return pos, value
def _parse_string(data, start, stop_at_equals=False, must_have_content=False):
"""Parse a string starting at position start in data.
Returns the position after the string, followed by the string itself, and
followed by a flog indicating whether the following character is an equals
sign (only set if stop_at_equals is True).
If must_have_content is set to True, no empty unquoted strings are accepted.
"""
if start == len(data):
raise ParsingError("Expecting string, but found end of input!")
char = data[start]
if char == '"' or char == "'":
end, value = _parse_quoted_string(data, start)
has_content = True
else:
end, value = _parse_unquoted_string(data, start, stop_at_equals)
has_content = len(value) > 0
if must_have_content and not has_content:
raise ParsingError("String starting at {0} must be non-empty!".format(_format_position(data, start)))
next_is_equals = False
if stop_at_equals and end + 1 < len(data):
next_is_equals = (data[end] == '=')
return end, value, next_is_equals
def _parse_shortcode_args(data, start, shortcode_name, start_pos):
"""When pointed to after a shortcode's name in a shortcode tag, parses the shortcode's arguments until '%}}'.
Returns the position after '%}}', followed by a tuple (args, kw).
name and start_pos are only used for formatting error messages.
"""
args = []
kwargs = {}
pos = start
while True:
# Skip whitespaces
try:
pos = _skip_whitespace(data, pos, must_be_nontrivial=True)
except ParsingError:
if not args and not kwargs:
raise ParsingError("Shortcode '{0}' starting at {1} is not terminated correctly with '%}}}}'!".format(shortcode_name, _format_position(data, start_pos)))
else:
raise ParsingError("Syntax error in shortcode '{0}' at {1}: expecting whitespace!".format(shortcode_name, _format_position(data, pos)))
if pos == len(data):
break
# Check for end of shortcode
if pos + 3 <= len(data) and data[pos:pos + 3] == '%}}':
return pos + 3, (args, kwargs)
# Read name
pos, name, next_is_equals = _parse_string(data, pos, stop_at_equals=True, must_have_content=True)
if next_is_equals:
# Read value
pos, value, _ = _parse_string(data, pos + 1, stop_at_equals=False, must_have_content=False)
# Store keyword argument
kwargs[name] = value
else:
# Store positional argument
args.append(name)
raise ParsingError("Shortcode '{0}' starting at {1} is not terminated correctly with '%}}}}'!".format(shortcode_name, _format_position(data, start_pos)))
def _new_sc_id():
return str('SHORTCODE{0}REPLACEMENT'.format(str(uuid.uuid4()).replace('-', '')))
def extract_shortcodes(data):
"""
Return data with replaced shortcodes, shortcodes.
data is the original data, with the shortcodes replaced by UUIDs.
a dictionary of shortcodes, where the keys are UUIDs and the values
are the shortcodes themselves ready to process.
"""
shortcodes = {}
splitted = _split_shortcodes(data)
if not data: # Empty
return '', {}
def extract_data_chunk(data):
"""Take a list of splitted shortcodes and return a string and a tail.
The string is data, the tail is ready for a new run of this same function.
"""
text = []
for i, token in enumerate(data):
if token[0] == 'SHORTCODE_START':
name = token[3]
sc_id = _new_sc_id()
text.append(sc_id)
# See if this shortcode closes
for j in range(i, len(data)):
if data[j][0] == 'SHORTCODE_END' and data[j][3] == name:
# Extract this chunk
shortcodes[sc_id] = ''.join(t[1] for t in data[i:j + 1])
return ''.join(text), data[j + 1:]
# Doesn't close
shortcodes[sc_id] = token[1]
return ''.join(text), data[i + 1:]
elif token[0] == 'TEXT':
text.append(token[1])
return ''.join(text), data[1:]
elif token[0] == 'SHORTCODE_END': # This is malformed
raise Exception('Closing unopened shortcode {}'.format(token[3]))
text = []
tail = splitted
while True:
new_text, tail = extract_data_chunk(tail)
text.append(new_text)
if not tail:
break
return ''.join(text), shortcodes
def _split_shortcodes(data):
"""Given input data, splits it into a sequence of texts, shortcode starts and shortcode ends.
Returns a list of tuples of the following forms:
1. ("TEXT", text)
2. ("SHORTCODE_START", text, start, name, args)
3. ("SHORTCODE_END", text, start, name)
Here, text is the raw text represented by the token; start is the starting position in data
of the token; name is the name of the shortcode; and args is a tuple (args, kw) as returned
by _parse_shortcode_args.
"""
pos = 0
result = []
while pos < len(data):
# Search for shortcode start
start = data.find('{{%', pos)
if start < 0:
result.append(("TEXT", data[pos:]))
break
result.append(("TEXT", data[pos:start]))
# Extract name
name_start = _skip_whitespace(data, start + 3)
name_end = _skip_nonwhitespace(data, name_start)
name = data[name_start:name_end]
if not name:
raise ParsingError("Syntax error: '{{{{%' must be followed by shortcode name ({0})!".format(_format_position(data, start)))
# Finish shortcode
if name[0] == '/':
# This is a closing shortcode
name = name[1:]
end_start = _skip_whitespace(data, name_end) # start of '%}}'
pos = end_start + 3
# Must be followed by '%}}'
if pos > len(data) or data[end_start:pos] != '%}}':
raise ParsingError("Syntax error: '{{{{% /{0}' must be followed by ' %}}}}' ({1})!".format(name, _format_position(data, end_start)))
result.append(("SHORTCODE_END", data[start:pos], start, name))
elif name == '%}}':
raise ParsingError("Syntax error: '{{{{%' must be followed by shortcode name ({0})!".format(_format_position(data, start)))
else:
# This is an opening shortcode
pos, args = _parse_shortcode_args(data, name_end, shortcode_name=name, start_pos=start)
result.append(("SHORTCODE_START", data[start:pos], start, name, args))
return result
def apply_shortcodes(data, registry, site=None, filename=None, raise_exceptions=False, lang=None, extra_context=None):
"""Apply Hugo-style shortcodes on data.
{{% name parameters %}} will end up calling the registered "name" function with the given parameters.
{{% name parameters %}} something {{% /name %}} will call name with the parameters and
one extra "data" parameter containing " something ".
If raise_exceptions is set to True, instead of printing error messages and terminating, errors are
passed on as exceptions to the caller.
The site parameter is passed with the same name to the shortcodes so they can access Nikola state.
>>> print(apply_shortcodes('==> {{% foo bar=baz %}} <==', {'foo': lambda *a, **k: k['bar']}))
==> baz <==
>>> print(apply_shortcodes('==> {{% foo bar=baz %}}some data{{% /foo %}} <==', {'foo': lambda *a, **k: k['bar']+k['data']}))
==> bazsome data <==
"""
if extra_context is None:
extra_context = {}
empty_string = ''
try:
# Split input data into text, shortcodes and shortcode endings
sc_data = _split_shortcodes(data)
# Now process data
result = []
dependencies = []
pos = 0
while pos < len(sc_data):
current = sc_data[pos]
if current[0] == "TEXT":
result.append(current[1])
pos += 1
elif current[0] == "SHORTCODE_END":
raise ParsingError("Found shortcode ending '{{{{% /{0} %}}}}' which isn't closing a started shortcode ({1})!".format(current[3], _format_position(data, current[2])))
elif current[0] == "SHORTCODE_START":
name = current[3]
# Check if we can find corresponding ending
found = None
for p in range(pos + 1, len(sc_data)):
if sc_data[p][0] == "SHORTCODE_END" and sc_data[p][3] == name:
found = p
break
if found:
# Found ending. Extract data argument:
data_arg = []
for p in range(pos + 1, found):
data_arg.append(sc_data[p][1])
data_arg = empty_string.join(data_arg)
pos = found + 1
else:
# Single shortcode
pos += 1
data_arg = ''
args, kw = current[4]
kw['site'] = site
kw['data'] = data_arg
kw['lang'] = lang
kw.update(extra_context)
if name in registry:
f = registry[name]
if getattr(f, 'nikola_shortcode_pass_filename', None):
kw['filename'] = filename
res = f(*args, **kw)
if not isinstance(res, tuple): # For backards compatibility
res = (res, [])
else:
LOGGER.error('Unknown shortcode %s (started at %s)', name, _format_position(data, current[2]))
res = ('', [])
result.append(res[0])
dependencies += res[1]
return empty_string.join(result), dependencies
except ParsingError as e:
if raise_exceptions:
# Throw up
raise
if filename:
LOGGER.error("Shortcode error in file {0}: {1}".format(filename, e))
else:
LOGGER.error("Shortcode error: {0}".format(e))
sys.exit(1)
|
import genmsg
import genpy.message # for wrapping get_message_class, get_service_class
# forward a bunch of old symbols from genpy for backwards compat
from genpy import DeserializationError # noqa: F401
from genpy import Duration # noqa: F401
from genpy import Message # noqa: F401
from genpy import SerializationError # noqa: F401
from genpy import TVal # noqa: F401
from genpy import Time # noqa: F401
from genpy.message import check_type # noqa: F401
from genpy.message import fill_message_args # noqa: F401
from genpy.message import get_printable_message_args # noqa: F401
from genpy.message import strify_message # noqa: F401
import roslib
import rospkg
def _get_message_or_service_class(type_str, message_type, reload_on_error=False):
# parse package and local type name for import
package, base_type = genmsg.package_resource_name(message_type)
if not package:
if base_type == 'Header':
package = 'std_msgs'
else:
raise ValueError('message type is missing package name: %s' % str(message_type))
pypkg = val = None
try:
# bootstrap our sys.path
roslib.launcher.load_manifest(package)
# import the package and return the class
pypkg = __import__('%s.%s' % (package, type_str))
val = getattr(getattr(pypkg, type_str), base_type)
except rospkg.ResourceNotFound:
val = None
except ImportError:
val = None
except AttributeError:
val = None
# this logic is mainly to support rosh, so that a user doesn't
# have to exit a shell just because a message wasn't built yet
if val is None and reload_on_error:
try:
reload # Python 2
except NameError:
from importlib import reload # Python 3
try:
if pypkg:
reload(pypkg)
val = getattr(getattr(pypkg, type_str), base_type)
except Exception:
val = None
return val
# cache for get_message_class
_message_class_cache = {}
# cache for get_service_class
_service_class_cache = {}
def get_message_class(message_type, reload_on_error=False):
if message_type in _message_class_cache:
return _message_class_cache[message_type]
# try w/o bootstrapping
cls = genpy.message.get_message_class(message_type, reload_on_error=reload_on_error)
if cls is None:
# try old loader w/ bootstrapping
cls = _get_message_or_service_class('msg', message_type, reload_on_error=reload_on_error)
if cls:
_message_class_cache[message_type] = cls
return cls
def get_service_class(service_type, reload_on_error=False):
if service_type in _service_class_cache:
return _service_class_cache[service_type]
cls = genpy.message.get_service_class(service_type, reload_on_error=reload_on_error)
# try w/o bootstrapping
if cls is None:
# try old loader w/ bootstrapping
cls = _get_message_or_service_class('srv', service_type, reload_on_error=reload_on_error)
if cls:
_service_class_cache[service_type] = cls
return cls
|
import os.path as op
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
import mne
from mne.stats import (spatio_temporal_cluster_test, f_threshold_mway_rm,
f_mway_rm, summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path + '/subjects'
src_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for all channels, removing a bad one
# ------------------------------------------------
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
# we'll load all four conditions that make up the 'two ways' of our ANOVA
event_id = dict(l_aud=1, r_aud=2, l_vis=3, r_vis=4)
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
epochs.equalize_event_counts(event_id)
###############################################################################
# Transform to source space
# -------------------------
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE, sLORETA, or eLORETA)
inverse_operator = read_inverse_operator(fname_inv)
# we'll only use one hemisphere to speed up this example
# instead of a second vertex array we'll pass an empty array
sample_vertices = [inverse_operator['src'][0]['vertno'], np.array([], int)]
# Let's average and compute inverse, then resample to speed things up
conditions = []
for cond in ['l_aud', 'r_aud', 'l_vis', 'r_vis']: # order is important
evoked = epochs[cond].average()
evoked.resample(50, npad='auto')
condition = apply_inverse(evoked, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition.crop(0, None)
conditions.append(condition)
tmin = conditions[0].tmin
tstep = conditions[0].tstep * 1000 # convert to milliseconds
###############################################################################
# Transform to common cortical space
# ----------------------------------
#
# Normally you would read in estimates across several subjects and morph them
# to the same cortical space (e.g. fsaverage). For example purposes, we will
# simulate this by just having each "subject" have the same response (just
# noisy in source space) here.
#
# We'll only consider the left hemisphere in this tutorial.
n_vertices_sample, n_times = conditions[0].lh_data.shape
n_subjects = 7
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 4) * 10
for ii, condition in enumerate(conditions):
X[:, :, :, ii] += condition.lh_data[:, :, np.newaxis]
###############################################################################
# It's a good idea to spatially smooth the data, and for visualization
# purposes, let's morph these to fsaverage, which is a grade 5 ICO source space
# with vertices 0:10242 for each hemisphere. Usually you'd have to morph
# each subject's data separately, but here since all estimates are on
# 'sample' we can use one morph matrix for all the heavy lifting.
# Read the source space we are morphing to (just left hemisphere)
src = mne.read_source_spaces(src_fname)
fsave_vertices = [src[0]['vertno'], []]
morph_mat = mne.compute_source_morph(
src=inverse_operator['src'], subject_to='fsaverage',
spacing=fsave_vertices, subjects_dir=subjects_dir, smooth=20).morph_mat
morph_mat = morph_mat[:, :n_vertices_sample] # just left hemi from src
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 4)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 4)
###############################################################################
# Now we need to prepare the group matrix for the ANOVA statistic. To make the
# clustering function work correctly with the ANOVA function X needs to be a
# list of multi-dimensional arrays (one per condition) of shape: samples
# (subjects) x time x space.
#
# First we permute dimensions, then split the array into a list of conditions
# and discard the empty dimension resulting from the split using numpy squeeze.
X = np.transpose(X, [2, 1, 0, 3]) #
X = [np.squeeze(x) for x in np.split(X, 4, axis=-1)]
###############################################################################
# Prepare function for arbitrary contrast
# ---------------------------------------
# As our ANOVA function is a multi-purpose tool we need to apply a few
# modifications to integrate it with the clustering function. This
# includes reshaping data, setting default arguments and processing
# the return values. For this reason we'll write a tiny dummy function.
#
# We will tell the ANOVA how to interpret the data matrix in terms of
# factors. This is done via the factor levels argument which is a list
# of the number factor levels for each factor.
factor_levels = [2, 2]
###############################################################################
# Finally we will pick the interaction effect by passing 'A:B'.
# (this notation is borrowed from the R formula language).
# As an aside, note that in this particular example, we cannot use the A*B
# notation which return both the main and the interaction effect. The reason
# is that the clustering function expects ``stat_fun`` to return a 1-D array.
# To get clusters for both, you must create a loop.
effects = 'A:B'
# Tell the ANOVA not to compute p-values which we don't need for clustering
return_pvals = False
# a few more convenient bindings
n_times = X[0].shape[1]
n_conditions = 4
###############################################################################
# A ``stat_fun`` must deal with a variable number of input arguments.
#
# Inside the clustering function each condition will be passed as flattened
# array, necessitated by the clustering procedure. The ANOVA however expects an
# input array of dimensions: subjects X conditions X observations (optional).
#
# The following function catches the list input and swaps the first and the
# second dimension, and finally calls ANOVA.
#
# .. note:: For further details on this ANOVA function consider the
# corresponding
# :ref:`time-frequency tutorial <tut-timefreq-twoway-anova>`.
def stat_fun(*args):
# get f-values only.
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=return_pvals)[0]
###############################################################################
# Compute clustering statistic
# ----------------------------
#
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial adjacency matrix (instead of spatio-temporal).
# as we only have one hemisphere we need only need half the adjacency
print('Computing adjacency.')
adjacency = mne.spatial_src_adjacency(src[:1])
# Now let's actually do the clustering. Please relax, on a small
# notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.0005
f_thresh = f_threshold_mway_rm(n_subjects, factor_levels, effects, pthresh)
# To speed things up a bit we will ...
n_permutations = 128 # ... run fewer permutations (reduces sensitivity)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_test(X, adjacency=adjacency, n_jobs=1,
threshold=f_thresh, stat_fun=stat_fun,
n_permutations=n_permutations,
buffer_size=None)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
# ----------------------
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# The brighter the color, the stronger the interaction between
# stimulus modality and stimulus location
brain = stc_all_cluster_vis.plot(subjects_dir=subjects_dir, views='lat',
time_label='temporal extent (ms)',
clim=dict(kind='value', lims=[0, 1, 40]))
brain.save_image('cluster-lh.png')
brain.show_view('medial')
###############################################################################
# Finally, let's investigate interaction effect by reconstructing the time
# courses:
inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds)][0] # first cluster
times = np.arange(X[0].shape[1]) * tstep * 1e3
plt.figure()
colors = ['y', 'b', 'g', 'purple']
event_ids = ['l_aud', 'r_aud', 'l_vis', 'r_vis']
for ii, (condition, color, eve_id) in enumerate(zip(X, colors, event_ids)):
# extract time course at cluster vertices
condition = condition[:, :, inds_v]
# normally we would normalize values across subjects but
# here we use data from the same subject so we're good to just
# create average time series across subjects and vertices.
mean_tc = condition.mean(axis=2).mean(axis=0)
std_tc = condition.std(axis=2).std(axis=0)
plt.plot(times, mean_tc.T, color=color, label=eve_id)
plt.fill_between(times, mean_tc + std_tc, mean_tc - std_tc, color='gray',
alpha=0.5, label='')
ymin, ymax = mean_tc.min() - 5, mean_tc.max() + 5
plt.xlabel('Time (ms)')
plt.ylabel('Activation (F-values)')
plt.xlim(times[[0, -1]])
plt.ylim(ymin, ymax)
plt.fill_betweenx((ymin, ymax), times[inds_t[0]],
times[inds_t[-1]], color='orange', alpha=0.3)
plt.legend()
plt.title('Interaction between stimulus-modality and location.')
plt.show()
|
import logging
import pysdcp
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Sony Projector"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Connect to Sony projector using network."""
host = config[CONF_HOST]
name = config[CONF_NAME]
sdcp_connection = pysdcp.Projector(host)
# Sanity check the connection
try:
sdcp_connection.get_power()
except ConnectionError:
_LOGGER.error("Failed to connect to projector '%s'", host)
return False
_LOGGER.debug("Validated projector '%s' OK", host)
add_entities([SonyProjector(sdcp_connection, name)], True)
return True
class SonyProjector(SwitchEntity):
"""Represents a Sony Projector as a switch."""
def __init__(self, sdcp_connection, name):
"""Init of the Sony projector."""
self._sdcp = sdcp_connection
self._name = name
self._state = None
self._available = False
self._attributes = {}
@property
def available(self):
"""Return if projector is available."""
return self._available
@property
def name(self):
"""Return name of the projector."""
return self._name
@property
def is_on(self):
"""Return if the projector is turned on."""
return self._state
@property
def state_attributes(self):
"""Return state attributes."""
return self._attributes
def update(self):
"""Get the latest state from the projector."""
try:
self._state = self._sdcp.get_power()
self._available = True
except ConnectionRefusedError:
_LOGGER.error("Projector connection refused")
self._available = False
def turn_on(self, **kwargs):
"""Turn the projector on."""
_LOGGER.debug("Powering on projector '%s'", self.name)
if self._sdcp.set_power(True):
_LOGGER.debug("Powered on successfully")
self._state = STATE_ON
else:
_LOGGER.error("Power on command was not successful")
def turn_off(self, **kwargs):
"""Turn the projector off."""
_LOGGER.debug("Powering off projector '%s'", self.name)
if self._sdcp.set_power(False):
_LOGGER.debug("Powered off successfully")
self._state = STATE_OFF
else:
_LOGGER.error("Power off command was not successful")
|
import logging
from hyperion import client, const
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_EFFECT,
ATTR_HS_COLOR,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_EFFECT,
LightEntity,
)
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
_LOGGER = logging.getLogger(__name__)
CONF_DEFAULT_COLOR = "default_color"
CONF_PRIORITY = "priority"
CONF_HDMI_PRIORITY = "hdmi_priority"
CONF_EFFECT_LIST = "effect_list"
# As we want to preserve brightness control for effects (e.g. to reduce the
# brightness for V4L), we need to persist the effect that is in flight, so
# subsequent calls to turn_on will know the keep the effect enabled.
# Unfortunately the Home Assistant UI does not easily expose a way to remove a
# selected effect (there is no 'No Effect' option by default). Instead, we
# create a new fake effect ("Solid") that is always selected by default for
# showing a solid color. This is the same method used by WLED.
KEY_EFFECT_SOLID = "Solid"
DEFAULT_COLOR = [255, 255, 255]
DEFAULT_BRIGHTNESS = 255
DEFAULT_EFFECT = KEY_EFFECT_SOLID
DEFAULT_NAME = "Hyperion"
DEFAULT_ORIGIN = "Home Assistant"
DEFAULT_PORT = 19444
DEFAULT_PRIORITY = 128
DEFAULT_HDMI_PRIORITY = 880
DEFAULT_EFFECT_LIST = []
SUPPORT_HYPERION = SUPPORT_COLOR | SUPPORT_BRIGHTNESS | SUPPORT_EFFECT
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_HDMI_PRIORITY, invalidation_version="0.118"),
cv.deprecated(CONF_DEFAULT_COLOR, invalidation_version="0.118"),
cv.deprecated(CONF_EFFECT_LIST, invalidation_version="0.118"),
PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_DEFAULT_COLOR, default=DEFAULT_COLOR): vol.All(
list,
vol.Length(min=3, max=3),
[vol.All(vol.Coerce(int), vol.Range(min=0, max=255))],
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PRIORITY, default=DEFAULT_PRIORITY): cv.positive_int,
vol.Optional(
CONF_HDMI_PRIORITY, default=DEFAULT_HDMI_PRIORITY
): cv.positive_int,
vol.Optional(CONF_EFFECT_LIST, default=DEFAULT_EFFECT_LIST): vol.All(
cv.ensure_list, [cv.string]
),
}
),
)
ICON_LIGHTBULB = "mdi:lightbulb"
ICON_EFFECT = "mdi:lava-lamp"
ICON_EXTERNAL_SOURCE = "mdi:television-ambient-light"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a Hyperion server remote."""
name = config[CONF_NAME]
host = config[CONF_HOST]
port = config[CONF_PORT]
priority = config[CONF_PRIORITY]
hyperion_client = client.HyperionClient(host, port)
if not await hyperion_client.async_client_connect():
raise PlatformNotReady
async_add_entities([Hyperion(name, priority, hyperion_client)])
class Hyperion(LightEntity):
"""Representation of a Hyperion remote."""
def __init__(self, name, priority, hyperion_client):
"""Initialize the light."""
self._name = name
self._priority = priority
self._client = hyperion_client
# Active state representing the Hyperion instance.
self._set_internal_state(
brightness=255, rgb_color=DEFAULT_COLOR, effect=KEY_EFFECT_SOLID
)
self._effect_list = []
@property
def should_poll(self):
"""Return whether or not this entity should be polled."""
return False
@property
def name(self):
"""Return the name of the light."""
return self._name
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def hs_color(self):
"""Return last color value set."""
return color_util.color_RGB_to_hs(*self._rgb_color)
@property
def is_on(self):
"""Return true if not black."""
return self._client.is_on()
@property
def icon(self):
"""Return state specific icon."""
return self._icon
@property
def effect(self):
"""Return the current effect."""
return self._effect
@property
def effect_list(self):
"""Return the list of supported effects."""
return (
self._effect_list
+ const.KEY_COMPONENTID_EXTERNAL_SOURCES
+ [KEY_EFFECT_SOLID]
)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_HYPERION
@property
def available(self):
"""Return server availability."""
return self._client.has_loaded_state
@property
def unique_id(self):
"""Return a unique id for this instance."""
return self._client.id
async def async_turn_on(self, **kwargs):
"""Turn the lights on."""
# == Turn device on ==
# Turn on both ALL (Hyperion itself) and LEDDEVICE. It would be
# preferable to enable LEDDEVICE after the settings (e.g. brightness,
# color, effect), but this is not possible due to:
# https://github.com/hyperion-project/hyperion.ng/issues/967
if not self.is_on:
if not await self._client.async_send_set_component(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_ALL,
const.KEY_STATE: True,
}
}
):
return
if not await self._client.async_send_set_component(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_LEDDEVICE,
const.KEY_STATE: True,
}
}
):
return
# == Get key parameters ==
brightness = kwargs.get(ATTR_BRIGHTNESS, self._brightness)
effect = kwargs.get(ATTR_EFFECT, self._effect)
if ATTR_HS_COLOR in kwargs:
rgb_color = color_util.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
else:
rgb_color = self._rgb_color
# == Set brightness ==
if self._brightness != brightness:
if not await self._client.async_send_set_adjustment(
**{
const.KEY_ADJUSTMENT: {
const.KEY_BRIGHTNESS: int(
round((float(brightness) * 100) / 255)
)
}
}
):
return
# == Set an external source
if effect and effect in const.KEY_COMPONENTID_EXTERNAL_SOURCES:
# Clear any color/effect.
if not await self._client.async_send_clear(
**{const.KEY_PRIORITY: self._priority}
):
return
# Turn off all external sources, except the intended.
for key in const.KEY_COMPONENTID_EXTERNAL_SOURCES:
if not await self._client.async_send_set_component(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: key,
const.KEY_STATE: effect == key,
}
}
):
return
# == Set an effect
elif effect and effect != KEY_EFFECT_SOLID:
# This call should not be necessary, but without it there is no priorities-update issued:
# https://github.com/hyperion-project/hyperion.ng/issues/992
if not await self._client.async_send_clear(
**{const.KEY_PRIORITY: self._priority}
):
return
if not await self._client.async_send_set_effect(
**{
const.KEY_PRIORITY: self._priority,
const.KEY_EFFECT: {const.KEY_NAME: effect},
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
):
return
# == Set a color
else:
if not await self._client.async_send_set_color(
**{
const.KEY_PRIORITY: self._priority,
const.KEY_COLOR: rgb_color,
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
):
return
async def async_turn_off(self, **kwargs):
"""Disable the LED output component."""
if not await self._client.async_send_set_component(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_LEDDEVICE,
const.KEY_STATE: False,
}
}
):
return
def _set_internal_state(self, brightness=None, rgb_color=None, effect=None):
"""Set the internal state."""
if brightness is not None:
self._brightness = brightness
if rgb_color is not None:
self._rgb_color = rgb_color
if effect is not None:
self._effect = effect
if effect == KEY_EFFECT_SOLID:
self._icon = ICON_LIGHTBULB
elif effect in const.KEY_COMPONENTID_EXTERNAL_SOURCES:
self._icon = ICON_EXTERNAL_SOURCE
else:
self._icon = ICON_EFFECT
def _update_components(self, _=None):
"""Update Hyperion components."""
self.async_write_ha_state()
def _update_adjustment(self, _=None):
"""Update Hyperion adjustments."""
if self._client.adjustment:
brightness_pct = self._client.adjustment[0].get(
const.KEY_BRIGHTNESS, DEFAULT_BRIGHTNESS
)
if brightness_pct < 0 or brightness_pct > 100:
return
self._set_internal_state(
brightness=int(round((brightness_pct * 255) / float(100)))
)
self.async_write_ha_state()
def _update_priorities(self, _=None):
"""Update Hyperion priorities."""
visible_priority = self._client.visible_priority
if visible_priority:
componentid = visible_priority.get(const.KEY_COMPONENTID)
if componentid in const.KEY_COMPONENTID_EXTERNAL_SOURCES:
self._set_internal_state(rgb_color=DEFAULT_COLOR, effect=componentid)
elif componentid == const.KEY_COMPONENTID_EFFECT:
# Owner is the effect name.
# See: https://docs.hyperion-project.org/en/json/ServerInfo.html#priorities
self._set_internal_state(
rgb_color=DEFAULT_COLOR, effect=visible_priority[const.KEY_OWNER]
)
elif componentid == const.KEY_COMPONENTID_COLOR:
self._set_internal_state(
rgb_color=visible_priority[const.KEY_VALUE][const.KEY_RGB],
effect=KEY_EFFECT_SOLID,
)
self.async_write_ha_state()
def _update_effect_list(self, _=None):
"""Update Hyperion effects."""
if not self._client.effects:
return
effect_list = []
for effect in self._client.effects or []:
if const.KEY_NAME in effect:
effect_list.append(effect[const.KEY_NAME])
if effect_list:
self._effect_list = effect_list
self.async_write_ha_state()
def _update_full_state(self):
"""Update full Hyperion state."""
self._update_adjustment()
self._update_priorities()
self._update_effect_list()
_LOGGER.debug(
"Hyperion full state update: On=%s,Brightness=%i,Effect=%s "
"(%i effects total),Color=%s",
self.is_on,
self._brightness,
self._effect,
len(self._effect_list),
self._rgb_color,
)
def _update_client(self, json):
"""Update client connection state."""
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register callbacks when entity added to hass."""
self._client.set_callbacks(
{
f"{const.KEY_ADJUSTMENT}-{const.KEY_UPDATE}": self._update_adjustment,
f"{const.KEY_COMPONENTS}-{const.KEY_UPDATE}": self._update_components,
f"{const.KEY_EFFECTS}-{const.KEY_UPDATE}": self._update_effect_list,
f"{const.KEY_PRIORITIES}-{const.KEY_UPDATE}": self._update_priorities,
f"{const.KEY_CLIENT}-{const.KEY_UPDATE}": self._update_client,
}
)
# Load initial state.
self._update_full_state()
return True
|
import csv
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.http import urlencode
from django.utils.translation import activate
from django.utils.translation import gettext as _
from django.utils.translation import pgettext
from django.views.generic.list import ListView
from weblate.accounts.notifications import NOTIFICATIONS_ACTIONS
from weblate.auth.models import User
from weblate.lang.models import Language
from weblate.trans.forms import ChangesForm
from weblate.trans.models.change import Change
from weblate.utils import messages
from weblate.utils.forms import FilterForm
from weblate.utils.site import get_site_url
from weblate.utils.views import get_project_translation, show_form_errors
class ChangesView(ListView):
"""Browser for changes."""
paginate_by = 20
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.project = None
self.component = None
self.translation = None
self.language = None
self.user = None
self.actions = set()
def get_context_data(self, **kwargs):
"""Create context for rendering page."""
context = super().get_context_data(**kwargs)
context["project"] = self.project
url = {}
if self.translation is not None:
context["project"] = self.translation.component.project
context["component"] = self.translation.component
context["translation"] = self.translation
url["lang"] = self.translation.language.code
url["component"] = self.translation.component.slug
url["project"] = self.translation.component.project.slug
context["changes_rss"] = reverse("rss-translation", kwargs=url)
context["title"] = (
pgettext("Changes in translation", "Changes in %s") % self.translation
)
elif self.component is not None:
context["project"] = self.component.project
context["component"] = self.component
url["component"] = self.component.slug
url["project"] = self.component.project.slug
context["changes_rss"] = reverse("rss-component", kwargs=url)
context["title"] = (
pgettext("Changes in component", "Changes in %s") % self.component
)
elif self.project is not None:
context["project"] = self.project
url["project"] = self.project.slug
context["changes_rss"] = reverse("rss-project", kwargs=url)
context["title"] = (
pgettext("Changes in project", "Changes in %s") % self.project
)
if self.language is not None:
context["language"] = self.language
url["lang"] = self.language.code
if "changes_rss" not in context:
context["changes_rss"] = reverse("rss-language", kwargs=url)
if "title" not in context:
context["title"] = (
pgettext("Changes in language", "Changes in %s") % self.language
)
if self.user is not None:
context["changes_user"] = self.user
url["user"] = self.user.username
if "title" not in context:
context["title"] = (
pgettext("Changes by user", "Changes by %s") % self.user.full_name
)
url = list(url.items())
for action in self.actions:
url.append(("action", action))
if not url:
context["changes_rss"] = reverse("rss")
context["query_string"] = urlencode(url)
context["form"] = ChangesForm(self.request, data=self.request.GET)
context["search_items"] = url
return context
def _get_queryset_project(self, form):
"""Filtering by translation/project."""
if not form.cleaned_data.get("project"):
return
try:
self.project, self.component, self.translation = get_project_translation(
self.request,
form.cleaned_data.get("project"),
form.cleaned_data.get("component"),
form.cleaned_data.get("lang"),
)
except Http404:
messages.error(self.request, _("Failed to find matching project!"))
def _get_queryset_language(self, form):
"""Filtering by language."""
if self.translation is None and form.cleaned_data.get("lang"):
try:
self.language = Language.objects.get(code=form.cleaned_data["lang"])
except Language.DoesNotExist:
messages.error(self.request, _("Failed to find matching language!"))
def _get_queryset_user(self, form):
"""Filtering by user."""
if form.cleaned_data.get("user"):
try:
self.user = User.objects.get(username=form.cleaned_data["user"])
except User.DoesNotExist:
messages.error(self.request, _("Failed to find matching user!"))
def _get_request_actions(self):
form = ChangesForm(self.request, data=self.request.GET)
if form.is_valid() and "action" in form.cleaned_data:
self.actions.update(form.cleaned_data["action"])
def get_queryset(self):
"""Return list of changes to browse."""
form = FilterForm(self.request.GET)
if form.is_valid():
self._get_queryset_project(form)
self._get_queryset_language(form)
self._get_queryset_user(form)
self._get_request_actions()
else:
show_form_errors(self.request, form)
result = Change.objects.last_changes(self.request.user)
if self.translation is not None:
result = result.filter(translation=self.translation)
elif self.component is not None:
result = result.filter(component=self.component)
elif self.project is not None:
result = result.filter(project=self.project)
if self.language is not None:
result = result.filter(language=self.language)
if self.actions:
result = result.filter(action__in=self.actions)
if self.user is not None:
result = result.filter(user=self.user)
return result
class ChangesCSVView(ChangesView):
"""CSV renderer for changes view."""
paginate_by = None
def get(self, request, *args, **kwargs):
object_list = self.get_queryset()[:2000]
# Do reasonable ACL check for global
acl_obj = self.translation or self.component or self.project
if not acl_obj:
for change in object_list:
if change.component:
acl_obj = change.component
break
if not request.user.has_perm("change.download", acl_obj):
raise PermissionDenied()
# Always output in english
activate("en")
response = HttpResponse(content_type="text/csv; charset=utf-8")
response["Content-Disposition"] = "attachment; filename=changes.csv"
writer = csv.writer(response)
# Add header
writer.writerow(
("timestamp", "action", "user", "url", "target", "edit_distance")
)
for change in object_list:
writer.writerow(
(
change.timestamp.isoformat(),
change.get_action_display(),
change.user.username if change.user else "",
get_site_url(change.get_absolute_url()),
change.target,
change.get_distance(),
)
)
return response
@login_required
def show_change(request, pk):
change = get_object_or_404(Change, pk=pk)
acl_obj = change.translation or change.component or change.project
if not request.user.has_perm("unit.edit", acl_obj):
raise PermissionDenied()
others = request.GET.getlist("other")
changes = None
if others:
changes = Change.objects.filter(pk__in=others + [change.pk])
for change in changes:
acl_obj = change.translation or change.component or change.project
if not request.user.has_perm("unit.edit", acl_obj):
raise PermissionDenied()
if change.action not in NOTIFICATIONS_ACTIONS:
content = ""
else:
notifications = NOTIFICATIONS_ACTIONS[change.action]
notification = notifications[0](None)
context = notification.get_context(change if not others else None)
context["request"] = request
context["changes"] = changes
context["subject"] = notification.render_template(
"_subject.txt", context, digest=bool(others)
)
content = notification.render_template(".html", context, digest=bool(others))
return HttpResponse(content_type="text/html; charset=utf-8", content=content)
|
import logging
import lupupy
from lupupy.exceptions import LupusecException
import voluptuous as vol
from homeassistant.const import CONF_IP_ADDRESS, CONF_NAME, CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DOMAIN = "lupusec"
NOTIFICATION_ID = "lupusec_notification"
NOTIFICATION_TITLE = "Lupusec Security Setup"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
LUPUSEC_PLATFORMS = ["alarm_control_panel", "binary_sensor", "switch"]
def setup(hass, config):
"""Set up Lupusec component."""
conf = config[DOMAIN]
username = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
ip_address = conf[CONF_IP_ADDRESS]
name = conf.get(CONF_NAME)
try:
hass.data[DOMAIN] = LupusecSystem(username, password, ip_address, name)
except LupusecException as ex:
_LOGGER.error(ex)
hass.components.persistent_notification.create(
f"Error: {ex}<br />You will need to restart hass after fixing.",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
return False
for platform in LUPUSEC_PLATFORMS:
discovery.load_platform(hass, platform, DOMAIN, {}, config)
return True
class LupusecSystem:
"""Lupusec System class."""
def __init__(self, username, password, ip_address, name):
"""Initialize the system."""
self.lupusec = lupupy.Lupusec(username, password, ip_address)
self.name = name
class LupusecDevice(Entity):
"""Representation of a Lupusec device."""
def __init__(self, data, device):
"""Initialize a sensor for Lupusec device."""
self._data = data
self._device = device
def update(self):
"""Update automation state."""
self._device.refresh()
@property
def name(self):
"""Return the name of the sensor."""
return self._device.name
|
from sqlalchemy.orm.exc import NoResultFound
from marshmallow import fields, post_load, pre_load, post_dump
from marshmallow.exceptions import ValidationError
from lemur.common import validators
from lemur.common.schema import LemurSchema, LemurInputSchema, LemurOutputSchema
from lemur.common.fields import (
KeyUsageExtension,
ExtendedKeyUsageExtension,
BasicConstraintsExtension,
SubjectAlternativeNameExtension,
)
from lemur.plugins import plugins
from lemur.plugins.utils import get_plugin_option
from lemur.roles.models import Role
from lemur.users.models import User
from lemur.authorities.models import Authority
from lemur.dns_providers.models import DnsProvider
from lemur.policies.models import RotationPolicy
from lemur.certificates.models import Certificate
from lemur.destinations.models import Destination
from lemur.notifications.models import Notification
def validate_options(options):
"""
Ensures that the plugin options are valid.
:param options:
:return:
"""
interval = get_plugin_option("interval", options)
unit = get_plugin_option("unit", options)
if not interval and not unit:
return
if unit == "month":
interval *= 30
elif unit == "week":
interval *= 7
if interval > 90:
raise ValidationError(
"Notification cannot be more than 90 days into the future."
)
def get_object_attribute(data, many=False):
if many:
ids = [d.get("id") for d in data]
names = [d.get("name") for d in data]
if None in ids:
if None in names:
raise ValidationError("Associated object require a name or id.")
else:
return "name"
return "id"
else:
if data.get("id"):
return "id"
elif data.get("name"):
return "name"
else:
raise ValidationError("Associated object require a name or id.")
def fetch_objects(model, data, many=False):
attr = get_object_attribute(data, many=many)
if many:
values = [v[attr] for v in data]
items = model.query.filter(getattr(model, attr).in_(values)).all()
found = [getattr(i, attr) for i in items]
diff = set(values).symmetric_difference(set(found))
if diff:
raise ValidationError(
"Unable to locate {model} with {attr} {diff}".format(
model=model, attr=attr, diff=",".join(list(diff))
)
)
return items
else:
try:
return model.query.filter(getattr(model, attr) == data[attr]).one()
except NoResultFound:
raise ValidationError(
"Unable to find {model} with {attr}: {data}".format(
model=model, attr=attr, data=data[attr]
)
)
class AssociatedAuthoritySchema(LemurInputSchema):
id = fields.Int()
name = fields.String()
@post_load
def get_object(self, data, many=False):
return fetch_objects(Authority, data, many=many)
class AssociatedDnsProviderSchema(LemurInputSchema):
id = fields.Int()
name = fields.String()
@post_load
def get_object(self, data, many=False):
return fetch_objects(DnsProvider, data, many=many)
class AssociatedRoleSchema(LemurInputSchema):
id = fields.Int()
name = fields.String()
@post_load
def get_object(self, data, many=False):
return fetch_objects(Role, data, many=many)
class AssociatedDestinationSchema(LemurInputSchema):
id = fields.Int()
name = fields.String()
@post_load
def get_object(self, data, many=False):
return fetch_objects(Destination, data, many=many)
class AssociatedNotificationSchema(LemurInputSchema):
id = fields.Int()
name = fields.String()
@post_load
def get_object(self, data, many=False):
return fetch_objects(Notification, data, many=many)
class AssociatedCertificateSchema(LemurInputSchema):
id = fields.Int()
name = fields.String()
@post_load
def get_object(self, data, many=False):
return fetch_objects(Certificate, data, many=many)
class AssociatedUserSchema(LemurInputSchema):
id = fields.Int()
name = fields.String()
@post_load
def get_object(self, data, many=False):
return fetch_objects(User, data, many=many)
class AssociatedRotationPolicySchema(LemurInputSchema):
id = fields.Int()
name = fields.String()
@post_load
def get_object(self, data, many=False):
return fetch_objects(RotationPolicy, data, many=many)
class PluginInputSchema(LemurInputSchema):
plugin_options = fields.List(fields.Dict(), validate=validate_options)
slug = fields.String(required=True)
title = fields.String()
description = fields.String()
@post_load
def get_object(self, data, many=False):
try:
data["plugin_object"] = plugins.get(data["slug"])
# parse any sub-plugins
for option in data.get("plugin_options", []):
if "plugin" in option.get("type", []):
sub_data, errors = PluginInputSchema().load(option["value"])
option["value"] = sub_data
return data
except Exception as e:
raise ValidationError(
"Unable to find plugin. Slug: {0} Reason: {1}".format(data["slug"], e)
)
class PluginOutputSchema(LemurOutputSchema):
id = fields.Integer()
label = fields.String()
description = fields.String()
active = fields.Boolean()
options = fields.List(fields.Dict(), dump_to="pluginOptions")
slug = fields.String()
title = fields.String()
plugins_output_schema = PluginOutputSchema(many=True)
plugin_output_schema = PluginOutputSchema
class BaseExtensionSchema(LemurSchema):
@pre_load(pass_many=True)
def preprocess(self, data, many):
return self.under(data, many=many)
@post_dump(pass_many=True)
def post_process(self, data, many):
if data:
data = self.camel(data, many=many)
return data
class AuthorityKeyIdentifierSchema(BaseExtensionSchema):
use_key_identifier = fields.Boolean()
use_authority_cert = fields.Boolean()
class CertificateInfoAccessSchema(BaseExtensionSchema):
include_aia = fields.Boolean()
@post_dump
def handle_keys(self, data):
return {"includeAIA": data["include_aia"]}
class CRLDistributionPointsSchema(BaseExtensionSchema):
include_crl_dp = fields.String()
@post_dump
def handle_keys(self, data):
return {"includeCRLDP": data["include_crl_dp"]}
class SubjectKeyIdentifierSchema(BaseExtensionSchema):
include_ski = fields.Boolean()
@post_dump
def handle_keys(self, data):
return {"includeSKI": data["include_ski"]}
class CustomOIDSchema(BaseExtensionSchema):
oid = fields.String()
encoding = fields.String(validate=validators.encoding)
value = fields.String()
is_critical = fields.Boolean()
class NamesSchema(BaseExtensionSchema):
names = SubjectAlternativeNameExtension()
class ExtensionSchema(BaseExtensionSchema):
basic_constraints = (
BasicConstraintsExtension()
) # some devices balk on default basic constraints
key_usage = KeyUsageExtension()
extended_key_usage = ExtendedKeyUsageExtension()
subject_key_identifier = fields.Nested(SubjectKeyIdentifierSchema)
sub_alt_names = fields.Nested(NamesSchema)
authority_key_identifier = fields.Nested(AuthorityKeyIdentifierSchema)
certificate_info_access = fields.Nested(CertificateInfoAccessSchema)
crl_distribution_points = fields.Nested(
CRLDistributionPointsSchema, dump_to="cRL_distribution_points"
)
# FIXME: Convert custom OIDs to a custom field in fields.py like other Extensions
# FIXME: Remove support in UI for Critical custom extensions https://github.com/Netflix/lemur/issues/665
custom = fields.List(fields.Nested(CustomOIDSchema))
class EndpointNestedOutputSchema(LemurOutputSchema):
__envelope__ = False
id = fields.Integer()
description = fields.String()
name = fields.String()
dnsname = fields.String()
owner = fields.Email()
type = fields.String()
active = fields.Boolean()
|
import base64
import logging
import requests
import voluptuous as vol
from homeassistant.components.image_processing import (
ATTR_CONFIDENCE,
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
PLATFORM_SCHEMA,
ImageProcessingFaceEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_IP_ADDRESS,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
HTTP_BAD_REQUEST,
HTTP_OK,
HTTP_UNAUTHORIZED,
)
from homeassistant.core import split_entity_id
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN, SERVICE_TEACH_FACE
_LOGGER = logging.getLogger(__name__)
ATTR_BOUNDING_BOX = "bounding_box"
ATTR_CLASSIFIER = "classifier"
ATTR_IMAGE_ID = "image_id"
ATTR_ID = "id"
ATTR_MATCHED = "matched"
FACEBOX_NAME = "name"
CLASSIFIER = "facebox"
DATA_FACEBOX = "facebox_classifiers"
FILE_PATH = "file_path"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
}
)
SERVICE_TEACH_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(FILE_PATH): cv.string,
}
)
def check_box_health(url, username, password):
"""Check the health of the classifier and return its id if healthy."""
kwargs = {}
if username:
kwargs["auth"] = requests.auth.HTTPBasicAuth(username, password)
try:
response = requests.get(url, **kwargs)
if response.status_code == HTTP_UNAUTHORIZED:
_LOGGER.error("AuthenticationError on %s", CLASSIFIER)
return None
if response.status_code == HTTP_OK:
return response.json()["hostname"]
except requests.exceptions.ConnectionError:
_LOGGER.error("ConnectionError: Is %s running?", CLASSIFIER)
return None
def encode_image(image):
"""base64 encode an image stream."""
base64_img = base64.b64encode(image).decode("ascii")
return base64_img
def get_matched_faces(faces):
"""Return the name and rounded confidence of matched faces."""
return {
face["name"]: round(face["confidence"], 2) for face in faces if face["matched"]
}
def parse_faces(api_faces):
"""Parse the API face data into the format required."""
known_faces = []
for entry in api_faces:
face = {}
if entry["matched"]: # This data is only in matched faces.
face[FACEBOX_NAME] = entry["name"]
face[ATTR_IMAGE_ID] = entry["id"]
else: # Lets be explicit.
face[FACEBOX_NAME] = None
face[ATTR_IMAGE_ID] = None
face[ATTR_CONFIDENCE] = round(100.0 * entry["confidence"], 2)
face[ATTR_MATCHED] = entry["matched"]
face[ATTR_BOUNDING_BOX] = entry["rect"]
known_faces.append(face)
return known_faces
def post_image(url, image, username, password):
"""Post an image to the classifier."""
kwargs = {}
if username:
kwargs["auth"] = requests.auth.HTTPBasicAuth(username, password)
try:
response = requests.post(url, json={"base64": encode_image(image)}, **kwargs)
if response.status_code == HTTP_UNAUTHORIZED:
_LOGGER.error("AuthenticationError on %s", CLASSIFIER)
return None
return response
except requests.exceptions.ConnectionError:
_LOGGER.error("ConnectionError: Is %s running?", CLASSIFIER)
return None
def teach_file(url, name, file_path, username, password):
"""Teach the classifier a name associated with a file."""
kwargs = {}
if username:
kwargs["auth"] = requests.auth.HTTPBasicAuth(username, password)
try:
with open(file_path, "rb") as open_file:
response = requests.post(
url,
data={FACEBOX_NAME: name, ATTR_ID: file_path},
files={"file": open_file},
**kwargs,
)
if response.status_code == HTTP_UNAUTHORIZED:
_LOGGER.error("AuthenticationError on %s", CLASSIFIER)
elif response.status_code == HTTP_BAD_REQUEST:
_LOGGER.error(
"%s teaching of file %s failed with message:%s",
CLASSIFIER,
file_path,
response.text,
)
except requests.exceptions.ConnectionError:
_LOGGER.error("ConnectionError: Is %s running?", CLASSIFIER)
def valid_file_path(file_path):
"""Check that a file_path points to a valid file."""
try:
cv.isfile(file_path)
return True
except vol.Invalid:
_LOGGER.error("%s error: Invalid file path: %s", CLASSIFIER, file_path)
return False
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the classifier."""
if DATA_FACEBOX not in hass.data:
hass.data[DATA_FACEBOX] = []
ip_address = config[CONF_IP_ADDRESS]
port = config[CONF_PORT]
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
url_health = f"http://{ip_address}:{port}/healthz"
hostname = check_box_health(url_health, username, password)
if hostname is None:
return
entities = []
for camera in config[CONF_SOURCE]:
facebox = FaceClassifyEntity(
ip_address,
port,
username,
password,
hostname,
camera[CONF_ENTITY_ID],
camera.get(CONF_NAME),
)
entities.append(facebox)
hass.data[DATA_FACEBOX].append(facebox)
add_entities(entities)
def service_handle(service):
"""Handle for services."""
entity_ids = service.data.get("entity_id")
classifiers = hass.data[DATA_FACEBOX]
if entity_ids:
classifiers = [c for c in classifiers if c.entity_id in entity_ids]
for classifier in classifiers:
name = service.data.get(ATTR_NAME)
file_path = service.data.get(FILE_PATH)
classifier.teach(name, file_path)
hass.services.register(
DOMAIN, SERVICE_TEACH_FACE, service_handle, schema=SERVICE_TEACH_SCHEMA
)
class FaceClassifyEntity(ImageProcessingFaceEntity):
"""Perform a face classification."""
def __init__(
self, ip_address, port, username, password, hostname, camera_entity, name=None
):
"""Init with the API key and model id."""
super().__init__()
self._url_check = f"http://{ip_address}:{port}/{CLASSIFIER}/check"
self._url_teach = f"http://{ip_address}:{port}/{CLASSIFIER}/teach"
self._username = username
self._password = password
self._hostname = hostname
self._camera = camera_entity
if name:
self._name = name
else:
camera_name = split_entity_id(camera_entity)[1]
self._name = f"{CLASSIFIER} {camera_name}"
self._matched = {}
def process_image(self, image):
"""Process an image."""
response = post_image(self._url_check, image, self._username, self._password)
if response:
response_json = response.json()
if response_json["success"]:
total_faces = response_json["facesCount"]
faces = parse_faces(response_json["faces"])
self._matched = get_matched_faces(faces)
self.process_faces(faces, total_faces)
else:
self.total_faces = None
self.faces = []
self._matched = {}
def teach(self, name, file_path):
"""Teach classifier a face name."""
if not self.hass.config.is_allowed_path(file_path) or not valid_file_path(
file_path
):
return
teach_file(self._url_teach, name, file_path, self._username, self._password)
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the classifier attributes."""
return {
"matched_faces": self._matched,
"total_matched_faces": len(self._matched),
"hostname": self._hostname,
}
|
from pyinsteon.constants import FanSpeed
from homeassistant.components.fan import (
DOMAIN as FAN_DOMAIN,
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import SIGNAL_ADD_ENTITIES
from .insteon_entity import InsteonEntity
from .utils import async_add_insteon_entities
FAN_SPEEDS = [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
SPEED_TO_VALUE = {
SPEED_OFF: FanSpeed.OFF,
SPEED_LOW: FanSpeed.LOW,
SPEED_MEDIUM: FanSpeed.MEDIUM,
SPEED_HIGH: FanSpeed.HIGH,
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Insteon fans from a config entry."""
def add_entities(discovery_info=None):
"""Add the Insteon entities for the platform."""
async_add_insteon_entities(
hass, FAN_DOMAIN, InsteonFanEntity, async_add_entities, discovery_info
)
signal = f"{SIGNAL_ADD_ENTITIES}_{FAN_DOMAIN}"
async_dispatcher_connect(hass, signal, add_entities)
add_entities()
class InsteonFanEntity(InsteonEntity, FanEntity):
"""An INSTEON fan entity."""
@property
def speed(self) -> str:
"""Return the current speed."""
if self._insteon_device_group.value == FanSpeed.HIGH:
return SPEED_HIGH
if self._insteon_device_group.value == FanSpeed.MEDIUM:
return SPEED_MEDIUM
if self._insteon_device_group.value == FanSpeed.LOW:
return SPEED_LOW
return SPEED_OFF
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return FAN_SPEEDS
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_SET_SPEED
async def async_turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the fan."""
if speed is None:
speed = SPEED_MEDIUM
await self.async_set_speed(speed)
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the fan."""
await self._insteon_device.async_fan_off()
async def async_set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
fan_speed = SPEED_TO_VALUE[speed]
if fan_speed == FanSpeed.OFF:
await self._insteon_device.async_fan_off()
else:
await self._insteon_device.async_fan_on(on_level=fan_speed)
|
import os
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
sys.path = [os.path.abspath(os.path.dirname(os.path.dirname(__file__)))] + sys.path
os.environ['is_test_suite'] = 'True'
from auto_ml import Predictor
import dill
import numpy as np
from nose.tools import assert_equal, assert_not_equal, with_setup
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import utils_testing as utils
def test_categorical_ensemble_basic_classifier():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'pclass': 'categorical'
, 'embarked': 'categorical'
, 'sex': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train_categorical_ensemble(df_titanic_train, categorical_column='pclass', optimize_final_model=False)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
# Small sample sizes mean there's a fair bit of noise here
assert -0.155 < test_score < -0.135
def test_categorical_ensembling_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train_categorical_ensemble(df_boston_train, perform_feature_selection=True, model_names=model_name, categorical_column='CHAS')
test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV)
print('test_score')
print(test_score)
lower_bound = -4.2
assert lower_bound < test_score < -2.8
|
import pytest
from kombu import version_info_t
from kombu.utils.text import version_string_as_tuple
def test_dir():
import kombu
assert dir(kombu)
@pytest.mark.parametrize('version,expected', [
('3', version_info_t(3, 0, 0, '', '')),
('3.3', version_info_t(3, 3, 0, '', '')),
('3.3.1', version_info_t(3, 3, 1, '', '')),
('3.3.1a3', version_info_t(3, 3, 1, 'a3', '')),
('3.3.1.a3.40c32', version_info_t(3, 3, 1, 'a3', '40c32')),
])
def test_version_string_as_tuple(version, expected):
assert version_string_as_tuple(version) == expected
|
import asyncio
from json import JSONEncoder
import logging
import os
from typing import Any, Callable, Dict, List, Optional, Type, Union
from homeassistant.const import EVENT_HOMEASSISTANT_FINAL_WRITE
from homeassistant.core import CALLBACK_TYPE, CoreState, HomeAssistant, callback
from homeassistant.helpers.event import async_call_later
from homeassistant.loader import bind_hass
from homeassistant.util import json as json_util
# mypy: allow-untyped-calls, allow-untyped-defs, no-warn-return-any
# mypy: no-check-untyped-defs
STORAGE_DIR = ".storage"
_LOGGER = logging.getLogger(__name__)
@bind_hass
async def async_migrator(
hass,
old_path,
store,
*,
old_conf_load_func=None,
old_conf_migrate_func=None,
):
"""Migrate old data to a store and then load data.
async def old_conf_migrate_func(old_data)
"""
store_data = await store.async_load()
# If we already have store data we have already migrated in the past.
if store_data is not None:
return store_data
def load_old_config():
"""Load old config."""
if not os.path.isfile(old_path):
return None
if old_conf_load_func is not None:
return old_conf_load_func(old_path)
return json_util.load_json(old_path)
config = await hass.async_add_executor_job(load_old_config)
if config is None:
return None
if old_conf_migrate_func is not None:
config = await old_conf_migrate_func(config)
await store.async_save(config)
await hass.async_add_executor_job(os.remove, old_path)
return config
@bind_hass
class Store:
"""Class to help storing data."""
def __init__(
self,
hass: HomeAssistant,
version: int,
key: str,
private: bool = False,
*,
encoder: Optional[Type[JSONEncoder]] = None,
):
"""Initialize storage class."""
self.version = version
self.key = key
self.hass = hass
self._private = private
self._data: Optional[Dict[str, Any]] = None
self._unsub_delay_listener: Optional[CALLBACK_TYPE] = None
self._unsub_final_write_listener: Optional[CALLBACK_TYPE] = None
self._write_lock = asyncio.Lock()
self._load_task: Optional[asyncio.Future] = None
self._encoder = encoder
@property
def path(self):
"""Return the config path."""
return self.hass.config.path(STORAGE_DIR, self.key)
async def async_load(self) -> Union[Dict, List, None]:
"""Load data.
If the expected version does not match the given version, the migrate
function will be invoked with await migrate_func(version, config).
Will ensure that when a call comes in while another one is in progress,
the second call will wait and return the result of the first call.
"""
if self._load_task is None:
self._load_task = self.hass.async_create_task(self._async_load())
return await self._load_task
async def _async_load(self):
"""Load the data."""
# Check if we have a pending write
if self._data is not None:
data = self._data
# If we didn't generate data yet, do it now.
if "data_func" in data:
data["data"] = data.pop("data_func")()
else:
data = await self.hass.async_add_executor_job(
json_util.load_json, self.path
)
if data == {}:
return None
if data["version"] == self.version:
stored = data["data"]
else:
_LOGGER.info(
"Migrating %s storage from %s to %s",
self.key,
data["version"],
self.version,
)
stored = await self._async_migrate_func(data["version"], data["data"])
self._load_task = None
return stored
async def async_save(self, data: Union[Dict, List]) -> None:
"""Save data."""
self._data = {"version": self.version, "key": self.key, "data": data}
if self.hass.state == CoreState.stopping:
self._async_ensure_final_write_listener()
return
await self._async_handle_write_data()
@callback
def async_delay_save(self, data_func: Callable[[], Dict], delay: float = 0) -> None:
"""Save data with an optional delay."""
self._data = {"version": self.version, "key": self.key, "data_func": data_func}
self._async_cleanup_delay_listener()
self._async_ensure_final_write_listener()
if self.hass.state == CoreState.stopping:
return
self._unsub_delay_listener = async_call_later(
self.hass, delay, self._async_callback_delayed_write
)
@callback
def _async_ensure_final_write_listener(self):
"""Ensure that we write if we quit before delay has passed."""
if self._unsub_final_write_listener is None:
self._unsub_final_write_listener = self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_FINAL_WRITE, self._async_callback_final_write
)
@callback
def _async_cleanup_final_write_listener(self):
"""Clean up a stop listener."""
if self._unsub_final_write_listener is not None:
self._unsub_final_write_listener()
self._unsub_final_write_listener = None
@callback
def _async_cleanup_delay_listener(self):
"""Clean up a delay listener."""
if self._unsub_delay_listener is not None:
self._unsub_delay_listener()
self._unsub_delay_listener = None
async def _async_callback_delayed_write(self, _now):
"""Handle a delayed write callback."""
# catch the case where a call is scheduled and then we stop Home Assistant
if self.hass.state == CoreState.stopping:
self._async_ensure_final_write_listener()
return
await self._async_handle_write_data()
async def _async_callback_final_write(self, _event):
"""Handle a write because Home Assistant is in final write state."""
self._unsub_final_write_listener = None
await self._async_handle_write_data()
async def _async_handle_write_data(self, *_args):
"""Handle writing the config."""
async with self._write_lock:
self._async_cleanup_delay_listener()
self._async_cleanup_final_write_listener()
if self._data is None:
# Another write already consumed the data
return
data = self._data
if "data_func" in data:
data["data"] = data.pop("data_func")()
self._data = None
try:
await self.hass.async_add_executor_job(
self._write_data, self.path, data
)
except (json_util.SerializationError, json_util.WriteError) as err:
_LOGGER.error("Error writing config for %s: %s", self.key, err)
def _write_data(self, path: str, data: Dict) -> None:
"""Write the data."""
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
_LOGGER.debug("Writing data for %s to %s", self.key, path)
json_util.save_json(path, data, self._private, encoder=self._encoder)
async def _async_migrate_func(self, old_version, old_data):
"""Migrate to the new version."""
raise NotImplementedError
async def async_remove(self):
"""Remove all data."""
self._async_cleanup_delay_listener()
self._async_cleanup_final_write_listener()
try:
await self.hass.async_add_executor_job(os.unlink, self.path)
except FileNotFoundError:
pass
|
import asyncio
import unittest
import pytest
import voluptuous as vol
import yaml
from homeassistant import config
import homeassistant.components as comps
from homeassistant.components.homeassistant import (
SERVICE_CHECK_CONFIG,
SERVICE_RELOAD_CORE_CONFIG,
SERVICE_SET_LOCATION,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ENTITY_MATCH_ALL,
ENTITY_MATCH_NONE,
EVENT_CORE_CONFIG_UPDATE,
SERVICE_HOMEASSISTANT_RESTART,
SERVICE_HOMEASSISTANT_STOP,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
import homeassistant.core as ha
from homeassistant.exceptions import HomeAssistantError, Unauthorized
from homeassistant.helpers import entity
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, patch
from tests.common import (
async_capture_events,
async_mock_service,
get_test_home_assistant,
mock_service,
patch_yaml_files,
)
def turn_on(hass, entity_id=None, **service_data):
"""Turn specified entity on if possible.
This is a legacy helper method. Do not use it for new tests.
"""
if entity_id is not None:
service_data[ATTR_ENTITY_ID] = entity_id
hass.services.call(ha.DOMAIN, SERVICE_TURN_ON, service_data)
def turn_off(hass, entity_id=None, **service_data):
"""Turn specified entity off.
This is a legacy helper method. Do not use it for new tests.
"""
if entity_id is not None:
service_data[ATTR_ENTITY_ID] = entity_id
hass.services.call(ha.DOMAIN, SERVICE_TURN_OFF, service_data)
def toggle(hass, entity_id=None, **service_data):
"""Toggle specified entity.
This is a legacy helper method. Do not use it for new tests.
"""
if entity_id is not None:
service_data[ATTR_ENTITY_ID] = entity_id
hass.services.call(ha.DOMAIN, SERVICE_TOGGLE, service_data)
def stop(hass):
"""Stop Home Assistant.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(ha.DOMAIN, SERVICE_HOMEASSISTANT_STOP)
def restart(hass):
"""Stop Home Assistant.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(ha.DOMAIN, SERVICE_HOMEASSISTANT_RESTART)
def check_config(hass):
"""Check the config files.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(ha.DOMAIN, SERVICE_CHECK_CONFIG)
def reload_core_config(hass):
"""Reload the core config.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(ha.DOMAIN, SERVICE_RELOAD_CORE_CONFIG)
class TestComponentsCore(unittest.TestCase):
"""Test homeassistant.components module."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
assert asyncio.run_coroutine_threadsafe(
async_setup_component(self.hass, "homeassistant", {}), self.hass.loop
).result()
self.hass.states.set("light.Bowl", STATE_ON)
self.hass.states.set("light.Ceiling", STATE_OFF)
self.addCleanup(self.hass.stop)
def test_is_on(self):
"""Test is_on method."""
assert comps.is_on(self.hass, "light.Bowl")
assert not comps.is_on(self.hass, "light.Ceiling")
assert comps.is_on(self.hass)
assert not comps.is_on(self.hass, "non_existing.entity")
def test_turn_on_without_entities(self):
"""Test turn_on method without entities."""
calls = mock_service(self.hass, "light", SERVICE_TURN_ON)
turn_on(self.hass)
self.hass.block_till_done()
assert 0 == len(calls)
def test_turn_on(self):
"""Test turn_on method."""
calls = mock_service(self.hass, "light", SERVICE_TURN_ON)
turn_on(self.hass, "light.Ceiling")
self.hass.block_till_done()
assert 1 == len(calls)
def test_turn_off(self):
"""Test turn_off method."""
calls = mock_service(self.hass, "light", SERVICE_TURN_OFF)
turn_off(self.hass, "light.Bowl")
self.hass.block_till_done()
assert 1 == len(calls)
def test_toggle(self):
"""Test toggle method."""
calls = mock_service(self.hass, "light", SERVICE_TOGGLE)
toggle(self.hass, "light.Bowl")
self.hass.block_till_done()
assert 1 == len(calls)
@patch("homeassistant.config.os.path.isfile", Mock(return_value=True))
def test_reload_core_conf(self):
"""Test reload core conf service."""
ent = entity.Entity()
ent.entity_id = "test.entity"
ent.hass = self.hass
ent.schedule_update_ha_state()
self.hass.block_till_done()
state = self.hass.states.get("test.entity")
assert state is not None
assert state.state == "unknown"
assert state.attributes == {}
files = {
config.YAML_CONFIG_FILE: yaml.dump(
{
ha.DOMAIN: {
"latitude": 10,
"longitude": 20,
"customize": {"test.Entity": {"hello": "world"}},
}
}
)
}
with patch_yaml_files(files, True):
reload_core_config(self.hass)
self.hass.block_till_done()
assert self.hass.config.latitude == 10
assert self.hass.config.longitude == 20
ent.schedule_update_ha_state()
self.hass.block_till_done()
state = self.hass.states.get("test.entity")
assert state is not None
assert state.state == "unknown"
assert state.attributes.get("hello") == "world"
@patch("homeassistant.config.os.path.isfile", Mock(return_value=True))
@patch("homeassistant.components.homeassistant._LOGGER.error")
@patch("homeassistant.config.async_process_ha_core_config")
def test_reload_core_with_wrong_conf(self, mock_process, mock_error):
"""Test reload core conf service."""
files = {config.YAML_CONFIG_FILE: yaml.dump(["invalid", "config"])}
with patch_yaml_files(files, True):
reload_core_config(self.hass)
self.hass.block_till_done()
assert mock_error.called
assert mock_process.called is False
@patch("homeassistant.core.HomeAssistant.async_stop", return_value=None)
def test_stop_homeassistant(self, mock_stop):
"""Test stop service."""
stop(self.hass)
self.hass.block_till_done()
assert mock_stop.called
@patch("homeassistant.core.HomeAssistant.async_stop", return_value=None)
@patch("homeassistant.config.async_check_ha_config_file", return_value=None)
def test_restart_homeassistant(self, mock_check, mock_restart):
"""Test stop service."""
restart(self.hass)
self.hass.block_till_done()
assert mock_restart.called
assert mock_check.called
@patch("homeassistant.core.HomeAssistant.async_stop", return_value=None)
@patch(
"homeassistant.config.async_check_ha_config_file",
side_effect=HomeAssistantError("Test error"),
)
def test_restart_homeassistant_wrong_conf(self, mock_check, mock_restart):
"""Test stop service."""
restart(self.hass)
self.hass.block_till_done()
assert mock_check.called
assert not mock_restart.called
@patch("homeassistant.core.HomeAssistant.async_stop", return_value=None)
@patch("homeassistant.config.async_check_ha_config_file", return_value=None)
def test_check_config(self, mock_check, mock_stop):
"""Test stop service."""
check_config(self.hass)
self.hass.block_till_done()
assert mock_check.called
assert not mock_stop.called
async def test_turn_on_to_not_block_for_domains_without_service(hass):
"""Test if turn_on is blocking domain with no service."""
await async_setup_component(hass, "homeassistant", {})
async_mock_service(hass, "light", SERVICE_TURN_ON)
hass.states.async_set("light.Bowl", STATE_ON)
hass.states.async_set("light.Ceiling", STATE_OFF)
# We can't test if our service call results in services being called
# because by mocking out the call service method, we mock out all
# So we mimic how the service registry calls services
service_call = ha.ServiceCall(
"homeassistant",
"turn_on",
{"entity_id": ["light.test", "sensor.bla", "light.bla"]},
)
service = hass.services._services["homeassistant"]["turn_on"]
with patch(
"homeassistant.core.ServiceRegistry.async_call",
return_value=None,
) as mock_call:
await service.job.target(service_call)
assert mock_call.call_count == 2
assert mock_call.call_args_list[0][0] == (
"light",
"turn_on",
{"entity_id": ["light.bla", "light.test"]},
True,
)
assert mock_call.call_args_list[1][0] == (
"sensor",
"turn_on",
{"entity_id": ["sensor.bla"]},
False,
)
async def test_entity_update(hass):
"""Test being able to call entity update."""
await async_setup_component(hass, "homeassistant", {})
with patch(
"homeassistant.helpers.entity_component.async_update_entity",
return_value=None,
) as mock_update:
await hass.services.async_call(
"homeassistant",
"update_entity",
{"entity_id": ["light.kitchen"]},
blocking=True,
)
assert len(mock_update.mock_calls) == 1
assert mock_update.mock_calls[0][1][1] == "light.kitchen"
async def test_setting_location(hass):
"""Test setting the location."""
await async_setup_component(hass, "homeassistant", {})
events = async_capture_events(hass, EVENT_CORE_CONFIG_UPDATE)
# Just to make sure that we are updating values.
assert hass.config.latitude != 30
assert hass.config.longitude != 40
await hass.services.async_call(
"homeassistant",
"set_location",
{"latitude": 30, "longitude": 40},
blocking=True,
)
assert len(events) == 1
assert hass.config.latitude == 30
assert hass.config.longitude == 40
async def test_require_admin(hass, hass_read_only_user):
"""Test services requiring admin."""
await async_setup_component(hass, "homeassistant", {})
for service in (
SERVICE_HOMEASSISTANT_RESTART,
SERVICE_HOMEASSISTANT_STOP,
SERVICE_CHECK_CONFIG,
SERVICE_RELOAD_CORE_CONFIG,
):
with pytest.raises(Unauthorized):
await hass.services.async_call(
ha.DOMAIN,
service,
{},
context=ha.Context(user_id=hass_read_only_user.id),
blocking=True,
)
assert False, f"Should have raises for {service}"
with pytest.raises(Unauthorized):
await hass.services.async_call(
ha.DOMAIN,
SERVICE_SET_LOCATION,
{"latitude": 0, "longitude": 0},
context=ha.Context(user_id=hass_read_only_user.id),
blocking=True,
)
async def test_turn_on_off_toggle_schema(hass, hass_read_only_user):
"""Test the schemas for the turn on/off/toggle services."""
await async_setup_component(hass, "homeassistant", {})
for service in SERVICE_TURN_ON, SERVICE_TURN_OFF, SERVICE_TOGGLE:
for invalid in None, "nothing", ENTITY_MATCH_ALL, ENTITY_MATCH_NONE:
with pytest.raises(vol.Invalid):
await hass.services.async_call(
ha.DOMAIN,
service,
{"entity_id": invalid},
context=ha.Context(user_id=hass_read_only_user.id),
blocking=True,
)
async def test_not_allowing_recursion(hass, caplog):
"""Test we do not allow recursion."""
await async_setup_component(hass, "homeassistant", {})
for service in SERVICE_TURN_ON, SERVICE_TURN_OFF, SERVICE_TOGGLE:
await hass.services.async_call(
ha.DOMAIN,
service,
{"entity_id": "homeassistant.light"},
blocking=True,
)
assert (
f"Called service homeassistant.{service} with invalid entity IDs homeassistant.light"
in caplog.text
), service
|
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links import PixelwiseSoftmaxClassifier
class DummySemanticSegmentationModel(chainer.Chain):
def __init__(self, n_class):
super(DummySemanticSegmentationModel, self).__init__()
self.n_class = n_class
def forward(self, x):
n, _, h, w = x.shape
y = self.xp.random.rand(n, self.n_class, h, w).astype(np.float32)
return chainer.Variable(y)
@testing.parameterize(
{'n_class': 11, 'ignore_label': -1, 'class_weight': True},
{'n_class': 11, 'ignore_label': 11, 'class_weight': None},
)
class TestPixelwiseSoftmaxClassifier(unittest.TestCase):
def setUp(self):
model = DummySemanticSegmentationModel(self.n_class)
if self.class_weight:
self.class_weight = [0.1 * i for i in range(self.n_class)]
self.link = PixelwiseSoftmaxClassifier(
model, self.ignore_label, self.class_weight)
self.x = np.random.rand(2, 3, 16, 16).astype(np.float32)
self.t = np.random.randint(
self.n_class, size=(2, 16, 16)).astype(np.int32)
def _check_call(self):
xp = self.link.xp
loss = self.link(chainer.Variable(xp.asarray(self.x)),
chainer.Variable(xp.asarray(self.t)))
self.assertIsInstance(loss, chainer.Variable)
self.assertIsInstance(loss.array, self.link.xp.ndarray)
self.assertEqual(loss.shape, ())
self.assertTrue(hasattr(self.link, 'y'))
self.assertIsNotNone(self.link.y)
self.assertTrue(hasattr(self.link, 'loss'))
xp.testing.assert_allclose(self.link.loss.array, loss.array)
def test_call_cpu(self):
self._check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self._check_call()
testing.run_module(__name__, __file__)
|
import mock
import pytest
from kubernetes.client.rest import ApiException
from paasta_tools.kubernetes.bin.paasta_secrets_sync import main
from paasta_tools.kubernetes.bin.paasta_secrets_sync import parse_args
from paasta_tools.kubernetes.bin.paasta_secrets_sync import sync_all_secrets
from paasta_tools.kubernetes.bin.paasta_secrets_sync import sync_secrets
def test_parse_args():
with mock.patch(
"paasta_tools.kubernetes.bin.paasta_secrets_sync.argparse.ArgumentParser",
autospec=True,
):
assert parse_args()
def test_main():
with mock.patch(
"paasta_tools.kubernetes.bin.paasta_secrets_sync.parse_args", autospec=True
), mock.patch(
"paasta_tools.kubernetes.bin.paasta_secrets_sync.load_system_paasta_config",
autospec=True,
), mock.patch(
"paasta_tools.kubernetes.bin.paasta_secrets_sync.KubeClient", autospec=True
), mock.patch(
"paasta_tools.kubernetes.bin.paasta_secrets_sync.sync_all_secrets",
autospec=True,
) as mock_sync_all_secrets:
mock_sync_all_secrets.return_value = True
with pytest.raises(SystemExit) as e:
main()
assert e.value.code == 0
mock_sync_all_secrets.return_value = False
with pytest.raises(SystemExit) as e:
main()
assert e.value.code == 1
def test_sync_all_secrets():
with mock.patch(
"paasta_tools.kubernetes.bin.paasta_secrets_sync.sync_secrets", autospec=True
) as mock_sync_secrets:
mock_sync_secrets.side_effect = [True, True]
assert sync_all_secrets(
kube_client=mock.Mock(),
cluster="westeros-prod",
service_list=["foo", "bar"],
secret_provider_name="vaulty",
vault_cluster_config={},
soa_dir="/nail/blah",
)
mock_sync_secrets.side_effect = [True, False]
assert not sync_all_secrets(
kube_client=mock.Mock(),
cluster="westeros-prod",
service_list=["foo", "bar"],
secret_provider_name="vaulty",
vault_cluster_config={},
soa_dir="/nail/blah",
)
mock_sync_secrets.side_effect = None
assert sync_all_secrets(
kube_client=mock.Mock(),
cluster="westeros-prod",
service_list=["foo", "bar"],
secret_provider_name="vaulty",
vault_cluster_config={},
soa_dir="/nail/blah",
)
def test_sync_secrets():
with mock.patch(
"paasta_tools.kubernetes.bin.paasta_secrets_sync.get_secret_provider",
autospec=True,
) as mock_get_secret_provider, mock.patch(
"paasta_tools.kubernetes.bin.paasta_secrets_sync.os.scandir", autospec=True
) as mock_scandir, mock.patch(
"paasta_tools.kubernetes.bin.paasta_secrets_sync.open",
create=True,
autospec=False,
), mock.patch(
"paasta_tools.kubernetes.bin.paasta_secrets_sync.get_kubernetes_secret_signature",
autospec=True,
) as mock_get_kubernetes_secret_signature, mock.patch(
"paasta_tools.kubernetes.bin.paasta_secrets_sync.create_secret", autospec=True
) as mock_create_secret, mock.patch(
"paasta_tools.kubernetes.bin.paasta_secrets_sync.create_kubernetes_secret_signature",
autospec=True,
) as mock_create_kubernetes_secret_signature, mock.patch(
"paasta_tools.kubernetes.bin.paasta_secrets_sync.update_secret", autospec=True
) as mock_update_secret, mock.patch(
"paasta_tools.kubernetes.bin.paasta_secrets_sync.update_kubernetes_secret_signature",
autospec=True,
) as mock_update_kubernetes_secret_signature, mock.patch(
"paasta_tools.kubernetes.bin.paasta_secrets_sync.json.load", autospec=True
), mock.patch(
"os.path.isdir", autospec=True, return_value=True
):
mock_scandir.return_value.__enter__.return_value = []
mock_client = mock.Mock()
assert sync_secrets(
kube_client=mock_client,
cluster="westeros-prod",
service="universe",
secret_provider_name="vaulty",
vault_cluster_config={},
soa_dir="/nail/blah",
)
mock_scandir.return_value.__enter__.return_value = [mock.Mock(path="some_file")]
mock_client = mock.Mock()
assert sync_secrets(
kube_client=mock_client,
cluster="westeros-prod",
service="universe",
secret_provider_name="vaulty",
vault_cluster_config={},
soa_dir="/nail/blah",
)
mock_get_secret_provider.return_value = mock.Mock(
get_secret_signature_from_data=mock.Mock(return_value="123abc")
)
mock_scandir.return_value.__enter__.return_value = [
mock.Mock(path="some_file.json")
]
mock_client = mock.Mock()
mock_get_kubernetes_secret_signature.return_value = "123abc"
assert sync_secrets(
kube_client=mock_client,
cluster="westeros-prod",
service="universe",
secret_provider_name="vaulty",
vault_cluster_config={},
soa_dir="/nail/blah",
)
assert mock_get_kubernetes_secret_signature.called
assert not mock_create_secret.called
assert not mock_update_secret.called
assert not mock_create_kubernetes_secret_signature.called
assert not mock_update_kubernetes_secret_signature.called
mock_get_kubernetes_secret_signature.return_value = "123def"
assert sync_secrets(
kube_client=mock_client,
cluster="westeros-prod",
service="universe",
secret_provider_name="vaulty",
vault_cluster_config={},
soa_dir="/nail/blah",
)
assert mock_get_kubernetes_secret_signature.called
assert not mock_create_secret.called
assert mock_update_secret.called
assert not mock_create_kubernetes_secret_signature.called
assert mock_update_kubernetes_secret_signature.called
mock_update_kubernetes_secret_signature.reset_mock()
mock_update_secret.reset_mock()
mock_get_kubernetes_secret_signature.return_value = None
assert sync_secrets(
kube_client=mock_client,
cluster="westeros-prod",
service="universe",
secret_provider_name="vaulty",
vault_cluster_config={},
soa_dir="/nail/blah",
)
assert mock_get_kubernetes_secret_signature.called
assert mock_create_secret.called
assert not mock_update_secret.called
assert mock_create_kubernetes_secret_signature.called
assert not mock_update_kubernetes_secret_signature.called
mock_update_kubernetes_secret_signature.reset_mock()
mock_update_secret.reset_mock()
mock_create_secret.side_effect = ApiException(409)
assert sync_secrets(
kube_client=mock_client,
cluster="westeros-prod",
service="universe",
secret_provider_name="vaulty",
vault_cluster_config={},
soa_dir="/nail/blah",
)
assert mock_get_kubernetes_secret_signature.called
assert mock_create_secret.called
assert not mock_update_secret.called
assert mock_create_kubernetes_secret_signature.called
assert not mock_update_kubernetes_secret_signature.called
mock_create_secret.side_effect = ApiException(404)
with pytest.raises(ApiException):
assert sync_secrets(
kube_client=mock_client,
cluster="westeros-prod",
service="universe",
secret_provider_name="vaulty",
vault_cluster_config={},
soa_dir="/nail/blah",
)
|
import sys
import datetime
from Handler import Handler
from configobj import Section
try:
import boto
import boto.ec2.cloudwatch
import boto.utils
except ImportError:
boto = None
class cloudwatchHandler(Handler):
"""
Implements the abstract Handler class
Sending data to a AWS CloudWatch
"""
def __init__(self, config=None):
"""
Create a new instance of cloudwatchHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
if not boto:
self.log.error(
"CloudWatch: Boto is not installed, please install boto.")
return
# Initialize Data
self.connection = None
# Initialize Options
self.region = self.config['region']
instance_metadata = boto.utils.get_instance_metadata()
if 'instance-id' in instance_metadata:
self.instance_id = instance_metadata['instance-id']
self.log.debug("Setting InstanceId: " + self.instance_id)
else:
self.instance_id = None
self.log.error('CloudWatch: Failed to load instance metadata')
self.valid_config = ('region', 'collector', 'metric', 'namespace',
'name', 'unit', 'collect_by_instance',
'collect_without_dimension')
self.rules = []
for key_name, section in self.config.items():
if section.__class__ is Section:
keys = section.keys()
rules = self.get_default_rule_config()
for key in keys:
if key not in self.valid_config:
self.log.warning("invalid key %s in section %s",
key, section.name)
else:
rules[key] = section[key]
self.rules.append(rules)
# Create CloudWatch Connection
self._bind()
def get_default_rule_config(self):
"""
Return the default config for a rule
"""
config = {}
config.update({
'collector': '',
'metric': '',
'namespace': '',
'name': '',
'unit': 'None',
'collect_by_instance': True,
'collect_without_dimension': False
})
return config
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(cloudwatchHandler, self).get_default_config_help()
config.update({
'region': 'AWS region',
'metric': 'Diamond metric name',
'namespace': 'CloudWatch metric namespace',
'name': 'CloudWatch metric name',
'unit': 'CloudWatch metric unit',
'collector': 'Diamond collector name',
'collect_by_instance': 'Collect metrics for instances separately',
'collect_without_dimension': 'Collect metrics without dimension'
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(cloudwatchHandler, self).get_default_config()
config.update({
'region': 'us-east-1',
'collector': 'loadavg',
'metric': '01',
'namespace': 'MachineLoad',
'name': 'Avg01',
'unit': 'None',
'collect_by_instance': True,
'collect_without_dimension': False
})
return config
def _bind(self):
"""
Create CloudWatch Connection
"""
self.log.debug(
"CloudWatch: Attempting to connect to CloudWatch at Region: %s",
self.region)
try:
self.connection = boto.ec2.cloudwatch.connect_to_region(
self.region)
self.log.debug(
"CloudWatch: Succesfully Connected to CloudWatch at Region: %s",
self.region)
except boto.exception.EC2ResponseError:
self.log.error('CloudWatch: CloudWatch Exception Handler: ')
def __del__(self):
"""
Destroy instance of the cloudWatchHandler class
"""
try:
self.connection = None
except AttributeError:
pass
def process(self, metric):
"""
Process a metric and send it to CloudWatch
"""
if not boto:
return
collector = str(metric.getCollectorPath())
metricname = str(metric.getMetricPath())
# Send the data as ......
for rule in self.rules:
self.log.debug(
"Comparing Collector: [%s] with (%s) "
"and Metric: [%s] with (%s)",
str(rule['collector']),
collector,
str(rule['metric']),
metricname
)
if ((str(rule['collector']) == collector and
str(rule['metric']) == metricname)):
if rule['collect_by_instance'] and self.instance_id:
self.send_metrics_to_cloudwatch(
rule,
metric,
{'InstanceId': self.instance_id})
if rule['collect_without_dimension']:
self.send_metrics_to_cloudwatch(
rule,
metric,
{})
def send_metrics_to_cloudwatch(self, rule, metric, dimensions):
"""
Send metrics to CloudWatch for the given dimensions
"""
timestamp = datetime.datetime.utcfromtimestamp(metric.timestamp)
self.log.debug(
"CloudWatch: Attempting to publish metric: %s to %s "
"with value (%s) for dimensions %s @%s",
rule['name'],
rule['namespace'],
str(metric.value),
str(dimensions),
str(metric.timestamp)
)
try:
self.connection.put_metric_data(
str(rule['namespace']),
str(rule['name']),
str(metric.value),
timestamp, str(rule['unit']),
dimensions)
self.log.debug(
"CloudWatch: Successfully published metric: %s to"
" %s with value (%s) for dimensions %s",
rule['name'],
rule['namespace'],
str(metric.value),
str(dimensions))
except AttributeError as e:
self.log.error(
"CloudWatch: Failed publishing - %s ", str(e))
except Exception as e: # Rough connection re-try logic.
self.log.error(
"CloudWatch: Failed publishing - %s\n%s ",
str(e),
str(sys.exc_info()[0]))
self._bind()
|
import argparse
import glob
import os
import struct
import sys
def clamp_to_min_max(value, min, max):
if value > max:
value = max
elif value < min:
value = min
return value
def clamp_to_u8(value):
return clamp_to_min_max(value, 0, 255)
def parse_args():
parser = argparse.ArgumentParser(description="Set the mouse idle time")
parser.add_argument('-d', '--device', type=str, help="Device string like \"0003:1532:0045.000C\"")
parser.add_argument('--timeout', required=True, type=int, help="Seconds (60-900)")
args = parser.parse_args()
return args
def run():
args = parse_args()
if args.device is None:
mouse_dirs = glob.glob(os.path.join('/sys/bus/hid/drivers/razermouse/', "*:*:*.*"))
if len(mouse_dirs) > 1:
print("Multiple mouse directories found. Rerun with -d", file=sys.stderr)
sys.exit(1)
if len(mouse_dirs) < 1:
print("No mouse directories found. Make sure the driver is binded", file=sys.stderr)
sys.exit(1)
mouse_dir = mouse_dirs[0]
else:
mouse_dir = os.path.join('/sys/bus/hid/drivers/razermouse/', args.device)
if not os.path.isdir(mouse_dir):
print("Multiple mouse directories found. Rerun with -d", file=sys.stderr)
sys.exit(1)
seconds = clamp_to_min_max(args.seconds, 60, 900)
byte_string = bytes(str(seconds), 'utf-8') # Convert string to bytestring
mouse_idle_time_filepath = os.path.join(mouse_dir, "set_idle_time")
with open(mouse_idle_time_filepath, 'wb') as mouse_idle_time_file:
mouse_idle_time_file.write(byte_string)
print("Done")
if __name__ == '__main__':
run()
|
import six
from six.moves import zip
from pyVmomi import VmomiSupport, types
import logging
from VmomiSupport import GetWsdlName, Type
__Log__ = logging.getLogger('ObjDiffer')
def LogIf(condition, message):
"""Log a message if the condition is met"""
if condition:
__Log__.debug(message)
def IsPrimitiveType(obj):
"""See if the passed in type is a Primitive Type"""
return (isinstance(obj, types.bool) or isinstance(obj, types.byte) or
isinstance(obj, types.short) or isinstance(obj, six.integer_types) or
isinstance(obj, types.double) or isinstance(obj, types.float) or
isinstance(obj, six.string_types) or
isinstance(obj, types.PropertyPath) or
isinstance(obj, types.ManagedMethod) or
isinstance(obj, types.datetime) or
isinstance(obj, types.URI) or isinstance(obj, type))
class Differ:
"""Class for comparing two Objects"""
def __init__(self, looseMatch=False, ignoreArrayOrder=True):
self._looseMatch = looseMatch
self._ignoreArrayOrder = ignoreArrayOrder
def DiffAnyObjects(self, oldObj, newObj, isObjLink=False):
"""Diff any two Objects"""
if oldObj == newObj:
return True
if not oldObj or not newObj:
__Log__.debug('DiffAnyObjects: One of the objects is unset.')
return self._looseMatch
oldObjInstance = oldObj
newObjInstance = newObj
if isinstance(oldObj, list):
oldObjInstance = oldObj[0]
if isinstance(newObj, list):
newObjInstance = newObj[0]
# Need to see if it is a primitive type first since type information
# will not be available for them.
if (IsPrimitiveType(oldObj) and IsPrimitiveType(newObj)
and oldObj.__class__.__name__ == newObj.__class__.__name__):
if oldObj == newObj:
return True
elif oldObj == None or newObj == None:
__Log__.debug('DiffAnyObjects: One of the objects in None')
return False
oldType = Type(oldObjInstance)
newType = Type(newObjInstance)
if oldType != newType:
__Log__.debug('DiffAnyObjects: Types do not match %s != %s' %
(repr(GetWsdlName(oldObjInstance.__class__)),
repr(GetWsdlName(newObjInstance.__class__))))
return False
elif isinstance(oldObj, list):
return self.DiffArrayObjects(oldObj, newObj, isObjLink)
elif isinstance(oldObjInstance, types.ManagedObject):
return (not oldObj and not newObj) or (oldObj and newObj
and oldObj._moId == newObj._moId)
elif isinstance(oldObjInstance, types.DataObject):
if isObjLink:
bMatch = oldObj.GetKey() == newObj.GetKey()
LogIf(not bMatch, 'DiffAnyObjects: Keys do not match %s != %s'
% (oldObj.GetKey(), newObj.GetKey()))
return bMatch
return self.DiffDataObjects(oldObj, newObj)
else:
raise TypeError("Unknown type: "+repr(GetWsdlName(oldObj.__class__)))
def DiffDoArrays(self, oldObj, newObj, isElementLinks):
"""Diff two DataObject arrays"""
if len(oldObj) != len(newObj):
__Log__.debug('DiffDoArrays: Array lengths do not match %d != %d'
% (len(oldObj), len(newObj)))
return False
for i, j in zip(oldObj, newObj):
if isElementLinks:
if i.GetKey() != j.GetKey():
__Log__.debug('DiffDoArrays: Keys do not match %s != %s'
% (i.GetKey(), j.GetKey()))
return False
else:
if not self.DiffDataObjects(i, j):
__Log__.debug(
'DiffDoArrays: one of the elements do not match')
return False
return True
def DiffAnyArrays(self, oldObj, newObj, isElementLinks):
"""Diff two arrays which contain Any objects"""
if len(oldObj) != len(newObj):
__Log__.debug('DiffAnyArrays: Array lengths do not match. %d != %d'
% (len(oldObj), len(newObj)))
return False
for i, j in zip(oldObj, newObj):
if not self.DiffAnyObjects(i, j, isElementLinks):
__Log__.debug('DiffAnyArrays: One of the elements do not match.')
return False
return True
def DiffPrimitiveArrays(self, oldObj, newObj):
"""Diff two primitive arrays"""
if len(oldObj) != len(newObj):
__Log__.debug('DiffDoArrays: Array lengths do not match %d != %d'
% (len(oldObj), len(newObj)))
return False
match = True
if self._ignoreArrayOrder:
oldSet = oldObj and frozenset(oldObj) or frozenset()
newSet = newObj and frozenset(newObj) or frozenset()
match = (oldSet == newSet)
else:
for i, j in zip(oldObj, newObj):
if i != j:
match = False
break
if not match:
__Log__.debug(
'DiffPrimitiveArrays: One of the elements do not match.')
return False
return True
def DiffArrayObjects(self, oldObj, newObj, isElementLinks=False):
"""Method which deligates the diffing of arrays based on the type"""
if oldObj == newObj:
return True
if not oldObj or not newObj:
return False
if len(oldObj) != len(newObj):
__Log__.debug('DiffArrayObjects: Array lengths do not match %d != %d'
% (len(oldObj), len(newObj)))
return False
firstObj = oldObj[0]
if IsPrimitiveType(firstObj):
return self.DiffPrimitiveArrays(oldObj, newObj)
elif isinstance(firstObj, types.ManagedObject):
return self.DiffAnyArrays(oldObj, newObj, isElementLinks)
elif isinstance(firstObj, types.DataObject):
return self.DiffDoArrays(oldObj, newObj, isElementLinks)
else:
raise TypeError("Unknown type: %s" % oldObj.__class__)
def DiffDataObjects(self, oldObj, newObj):
"""Diff Data Objects"""
if oldObj == newObj:
return True
if not oldObj or not newObj:
__Log__.debug('DiffDataObjects: One of the objects in None')
return False
oldType = Type(oldObj)
newType = Type(newObj)
if oldType != newType:
__Log__.debug(
'DiffDataObjects: Types do not match for dataobjects. %s != %s'
% (oldObj._wsdlName, newObj._wsdlName))
return False
for prop in oldObj._GetPropertyList():
oldProp = getattr(oldObj, prop.name)
newProp = getattr(newObj, prop.name)
propType = oldObj._GetPropertyInfo(prop.name).type
if not oldProp and not newProp:
continue
elif ((prop.flags & VmomiSupport.F_OPTIONAL) and
self._looseMatch and (not newProp or not oldProp)):
continue
elif not oldProp or not newProp:
__Log__.debug(
'DiffDataObjects: One of the objects has property %s unset'
% prop.name)
return False
bMatch = True
if IsPrimitiveType(oldProp):
bMatch = oldProp == newProp
elif isinstance(oldProp, types.ManagedObject):
bMatch = self.DiffAnyObjects(oldProp, newProp, prop.flags
& VmomiSupport.F_LINK)
elif isinstance(oldProp, types.DataObject):
if prop.flags & VmomiSupport.F_LINK:
bMatch = oldObj.GetKey() == newObj.GetKey()
LogIf(not bMatch, 'DiffDataObjects: Key match failed %s != %s'
% (oldObj.GetKey(), newObj.GetKey()))
else:
bMatch = self.DiffAnyObjects(oldProp, newProp, prop.flags
& VmomiSupport.F_LINK)
elif isinstance(oldProp, list):
bMatch = self.DiffArrayObjects(oldProp, newProp, prop.flags
& VmomiSupport.F_LINK)
else:
raise TypeError("Unknown type: "+repr(propType))
if not bMatch:
__Log__.debug('DiffDataObjects: Objects differ in property %s'
% prop.name)
return False
return True
def DiffAnys(obj1, obj2, looseMatch=False, ignoreArrayOrder=True):
"""Diff any two objects. Objects can either be primitive type
or DataObjects"""
differ = Differ(looseMatch = looseMatch, ignoreArrayOrder = ignoreArrayOrder)
return differ.DiffAnyObjects(obj1, obj2)
|
from pmsensor import co2sensor
from pmsensor.co2sensor import read_mh_z19_with_temperature
import homeassistant.components.mhz19.sensor as mhz19
from homeassistant.components.sensor import DOMAIN
from homeassistant.const import (
CONCENTRATION_PARTS_PER_MILLION,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.setup import async_setup_component
from tests.async_mock import DEFAULT, Mock, patch
from tests.common import assert_setup_component
async def test_setup_missing_config(hass):
"""Test setup with configuration missing required entries."""
with assert_setup_component(0):
assert await async_setup_component(
hass, DOMAIN, {"sensor": {"platform": "mhz19"}}
)
@patch("pmsensor.co2sensor.read_mh_z19", side_effect=OSError("test error"))
async def test_setup_failed_connect(mock_co2, hass):
"""Test setup when connection error occurs."""
assert not mhz19.setup_platform(
hass,
{"platform": "mhz19", mhz19.CONF_SERIAL_DEVICE: "test.serial"},
None,
)
async def test_setup_connected(hass):
"""Test setup when connection succeeds."""
with patch.multiple(
"pmsensor.co2sensor",
read_mh_z19=DEFAULT,
read_mh_z19_with_temperature=DEFAULT,
):
read_mh_z19_with_temperature.return_value = None
mock_add = Mock()
assert mhz19.setup_platform(
hass,
{
"platform": "mhz19",
"monitored_conditions": ["co2", "temperature"],
mhz19.CONF_SERIAL_DEVICE: "test.serial",
},
mock_add,
)
assert mock_add.call_count == 1
@patch(
"pmsensor.co2sensor.read_mh_z19_with_temperature",
side_effect=OSError("test error"),
)
async def aiohttp_client_update_oserror(mock_function):
"""Test MHZClient when library throws OSError."""
client = mhz19.MHZClient(co2sensor, "test.serial")
client.update()
assert {} == client.data
@patch("pmsensor.co2sensor.read_mh_z19_with_temperature", return_value=(5001, 24))
async def aiohttp_client_update_ppm_overflow(mock_function):
"""Test MHZClient when ppm is too high."""
client = mhz19.MHZClient(co2sensor, "test.serial")
client.update()
assert client.data.get("co2") is None
@patch("pmsensor.co2sensor.read_mh_z19_with_temperature", return_value=(1000, 24))
async def aiohttp_client_update_good_read(mock_function):
"""Test MHZClient when ppm is too high."""
client = mhz19.MHZClient(co2sensor, "test.serial")
client.update()
assert {"temperature": 24, "co2": 1000} == client.data
@patch("pmsensor.co2sensor.read_mh_z19_with_temperature", return_value=(1000, 24))
async def test_co2_sensor(mock_function):
"""Test CO2 sensor."""
client = mhz19.MHZClient(co2sensor, "test.serial")
sensor = mhz19.MHZ19Sensor(client, mhz19.SENSOR_CO2, None, "name")
sensor.update()
assert sensor.name == "name: CO2"
assert sensor.state == 1000
assert sensor.unit_of_measurement == CONCENTRATION_PARTS_PER_MILLION
assert sensor.should_poll
assert sensor.device_state_attributes == {"temperature": 24}
@patch("pmsensor.co2sensor.read_mh_z19_with_temperature", return_value=(1000, 24))
async def test_temperature_sensor(mock_function):
"""Test temperature sensor."""
client = mhz19.MHZClient(co2sensor, "test.serial")
sensor = mhz19.MHZ19Sensor(client, mhz19.SENSOR_TEMPERATURE, None, "name")
sensor.update()
assert sensor.name == "name: Temperature"
assert sensor.state == 24
assert sensor.unit_of_measurement == TEMP_CELSIUS
assert sensor.should_poll
assert sensor.device_state_attributes == {"co2_concentration": 1000}
@patch("pmsensor.co2sensor.read_mh_z19_with_temperature", return_value=(1000, 24))
async def test_temperature_sensor_f(mock_function):
"""Test temperature sensor."""
client = mhz19.MHZClient(co2sensor, "test.serial")
sensor = mhz19.MHZ19Sensor(
client, mhz19.SENSOR_TEMPERATURE, TEMP_FAHRENHEIT, "name"
)
sensor.update()
assert sensor.state == 75.2
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
#pylint: skip-file
""" This is very old code, and no longer runs due to reorgization of the
UKF code"""
import numpy as np
import scipy.linalg as linalg
import matplotlib.pyplot as plt
from GetRadar import get_radar
from filterpy.kalman import UnscentedKalmanFilter as UKF
from filterpy.common import Q_discrete_white_noise
def fx(x, dt):
""" state transition function for sstate [downrange, vel, altitude]"""
F = np.array([[1., dt, 0.],
[0., 1., 0.],
[0., 0., 1.]])
return np.dot(F, x)
def hx(x):
""" returns slant range based on downrange distance and altitude"""
return (x[0]**2 + x[2]**2)**.5
if __name__ == "__main__":
dt = 0.05
radarUKF = UKF(dim_x=3, dim_z=1, dt=dt, kappa=0.)
radarUKF.Q *= Q_discrete_white_noise(3, 1, .01)
radarUKF.R *= 10
radarUKF.x = np.array([0., 90., 1100.])
radarUKF.P *= 100.
t = np.arange(0, 20+dt, dt)
n = len(t)
xs = []
rs = []
for i in range(n):
r = GetRadar(dt)
rs.append(r)
radarUKF.update(r, hx, fx)
xs.append(radarUKF.x)
xs = np.asarray(xs)
plt.subplot(311)
plt.plot(t, xs[:, 0])
plt.title('distance')
plt.subplot(312)
plt.plot(t, xs[:, 1])
plt.title('velocity')
plt.subplot(313)
plt.plot(t, xs[:, 2])
plt.title('altitude')
|
import logging
from pyinsteon import devices
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from .const import (
DOMAIN,
SIGNAL_ADD_DEFAULT_LINKS,
SIGNAL_LOAD_ALDB,
SIGNAL_PRINT_ALDB,
SIGNAL_REMOVE_ENTITY,
SIGNAL_SAVE_DEVICES,
STATE_NAME_LABEL_MAP,
)
from .utils import print_aldb_to_log
_LOGGER = logging.getLogger(__name__)
class InsteonEntity(Entity):
"""INSTEON abstract base entity."""
def __init__(self, device, group):
"""Initialize the INSTEON binary sensor."""
self._insteon_device_group = device.groups[group]
self._insteon_device = device
def __hash__(self):
"""Return the hash of the Insteon Entity."""
return hash(self._insteon_device)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def address(self):
"""Return the address of the node."""
return str(self._insteon_device.address)
@property
def group(self):
"""Return the INSTEON group that the entity responds to."""
return self._insteon_device_group.group
@property
def unique_id(self) -> str:
"""Return a unique ID."""
if self._insteon_device_group.group == 0x01:
uid = self._insteon_device.id
else:
uid = f"{self._insteon_device.id}_{self._insteon_device_group.group}"
return uid
@property
def name(self):
"""Return the name of the node (used for Entity_ID)."""
# Set a base description
description = self._insteon_device.description
if description is None:
description = "Unknown Device"
# Get an extension label if there is one
extension = self._get_label()
if extension:
extension = f" {extension}"
return f"{description} {self._insteon_device.address}{extension}"
@property
def device_state_attributes(self):
"""Provide attributes for display on device card."""
return {"insteon_address": self.address, "insteon_group": self.group}
@property
def device_info(self):
"""Return device information."""
return {
"identifiers": {(DOMAIN, str(self._insteon_device.address))},
"name": f"{self._insteon_device.description} {self._insteon_device.address}",
"model": f"{self._insteon_device.model} (0x{self._insteon_device.cat:02x}, 0x{self._insteon_device.subcat:02x})",
"sw_version": f"{self._insteon_device.firmware:02x} Engine Version: {self._insteon_device.engine_version}",
"manufacturer": "Smart Home",
"via_device": (DOMAIN, str(devices.modem.address)),
}
@callback
def async_entity_update(self, name, address, value, group):
"""Receive notification from transport that new data exists."""
_LOGGER.debug(
"Received update for device %s group %d value %s",
address,
group,
value,
)
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register INSTEON update events."""
_LOGGER.debug(
"Tracking updates for device %s group %d name %s",
self.address,
self.group,
self._insteon_device_group.name,
)
self._insteon_device_group.subscribe(self.async_entity_update)
load_signal = f"{self.entity_id}_{SIGNAL_LOAD_ALDB}"
self.async_on_remove(
async_dispatcher_connect(self.hass, load_signal, self._async_read_aldb)
)
print_signal = f"{self.entity_id}_{SIGNAL_PRINT_ALDB}"
async_dispatcher_connect(self.hass, print_signal, self._print_aldb)
default_links_signal = f"{self.entity_id}_{SIGNAL_ADD_DEFAULT_LINKS}"
async_dispatcher_connect(
self.hass, default_links_signal, self._async_add_default_links
)
remove_signal = f"{self._insteon_device.address.id}_{SIGNAL_REMOVE_ENTITY}"
self.async_on_remove(
async_dispatcher_connect(self.hass, remove_signal, self.async_remove)
)
async def async_will_remove_from_hass(self):
"""Unsubscribe to INSTEON update events."""
_LOGGER.debug(
"Remove tracking updates for device %s group %d name %s",
self.address,
self.group,
self._insteon_device_group.name,
)
self._insteon_device_group.unsubscribe(self.async_entity_update)
async def _async_read_aldb(self, reload):
"""Call device load process and print to log."""
await self._insteon_device.aldb.async_load(refresh=reload)
self._print_aldb()
async_dispatcher_send(self.hass, SIGNAL_SAVE_DEVICES)
def _print_aldb(self):
"""Print the device ALDB to the log file."""
print_aldb_to_log(self._insteon_device.aldb)
def _get_label(self):
"""Get the device label for grouped devices."""
label = ""
if len(self._insteon_device.groups) > 1:
if self._insteon_device_group.name in STATE_NAME_LABEL_MAP:
label = STATE_NAME_LABEL_MAP[self._insteon_device_group.name]
else:
label = f"Group {self.group:d}"
return label
async def _async_add_default_links(self):
"""Add default links between the device and the modem."""
await self._insteon_device.async_add_default_links()
|
import json
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import redshift
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
READY_STATUSES = ['available']
SNAPSHOT_READY_STATUSES = ['completed']
def AddTags(resource_arn, region):
"""Adds tags to a Redshift cluster created by PerfKitBenchmarker.
Args:
resource_arn: The arn of AWS resource to operate on.
region: The AWS region resource was created in.
"""
cmd_prefix = util.AWS_PREFIX
tag_cmd = cmd_prefix + ['redshift', 'create-tags', '--region=%s' % region,
'--resource-name', resource_arn, '--tags']
tag_cmd += util.MakeFormattedDefaultTags()
vm_util.IssueCommand(tag_cmd)
class AddingIAMRole(object):
"""IAM Role to associate with the cluster.
IAM Role can be associated with the cluster to access to other services such
as S3.
Attributes:
cluster_identifier: Identifier of the cluster
iam_role_name: Role name of the IAM
"""
def __init__(self, cluster_identifier, iam_role_name, cmd_prefix):
self.cmd_prefix = cmd_prefix
self.cluster_identifier = cluster_identifier
self.iam_role_name = iam_role_name
cmd = self.cmd_prefix + ['redshift',
'modify-cluster-iam-roles',
'--cluster-identifier',
self.cluster_identifier,
'--add-iam-roles',
self.iam_role_name]
vm_util.IssueCommand(cmd)
class Spectrum(redshift.Redshift):
"""Object representing a Spectrum cluster.
Attributes:
cluster_id: ID of the cluster.
project: ID of the project.
"""
SERVICE_TYPE = 'spectrum'
def __init__(self, edw_service_spec):
super(Spectrum, self).__init__(edw_service_spec)
# Cluster setup attributes
self.iam_role = edw_service_spec.iam_role
def _IsReady(self):
"""Method to return if the cluster is ready to handle queries."""
return self._IsClusterReady() and self._IsSnapshotRestored()
def _IsClusterReady(self):
"""Method to return if the cluster is ready."""
stdout, _, _ = self.__DescribeCluster()
return json.loads(stdout)['Clusters'][0]['ClusterStatus'] in READY_STATUSES
def __DescribeCluster(self):
"""Describe a spectrum cluster."""
cmd = self.cmd_prefix + ['redshift', 'describe-clusters',
'--cluster-identifier', self.cluster_identifier]
return vm_util.IssueCommand(cmd)
def _IsSnapshotRestored(self):
"""Method to return if the cluster snapshot is completed restoring."""
stdout, _, _, = self.__DescribeCluster()
return (json.loads(stdout)['Clusters'][0]['RestoreStatus']['Status'] in
SNAPSHOT_READY_STATUSES)
def _PostCreate(self):
"""Perform general post create operations on the cluster.
Get the endpoint to be used for interacting with the cluster and apply
tags on the cluster.
"""
@vm_util.Retry(poll_interval=self.POLL_INTERVAL, fuzz=0,
timeout=self.READY_TIMEOUT,
retryable_exceptions=(
errors.Resource.RetryableCreationError,))
def WaitUntilReady():
if not self._IsReady():
raise errors.Resource.RetryableCreationError('Adding IAM Role')
stdout, _, _ = self.__DescribeCluster()
self.adding_iam_role = None
if self.iam_role is not None:
self.adding_iam_role = AddingIAMRole(self.cluster_identifier,
self.iam_role,
self.cmd_prefix)
WaitUntilReady()
stdout, _, _ = self.__DescribeCluster()
self.endpoint = json.loads(stdout)['Clusters'][0]['Endpoint']['Address']
account = util.GetAccount()
self.arn = 'arn:aws:redshift:{}:{}:cluster:{}'.format(self.region, account,
self.
cluster_identifier)
AddTags(self.arn, self.region)
|
from homeassistant.components.light import ATTR_BRIGHTNESS
from homeassistant.components.rflink import EVENT_BUTTON_PRESSED
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import CoreState, State, callback
from tests.common import mock_restore_cache
from tests.components.rflink.test_init import mock_rflink
DOMAIN = "light"
CONFIG = {
"rflink": {
"port": "/dev/ttyABC0",
"ignore_devices": ["ignore_wildcard_*", "ignore_light"],
},
DOMAIN: {
"platform": "rflink",
"devices": {
"protocol_0_0": {"name": "test", "aliases": ["test_alias_0_0"]},
"dimmable_0_0": {"name": "dim_test", "type": "dimmable"},
"switchable_0_0": {"name": "switch_test", "type": "switchable"},
},
},
}
async def test_default_setup(hass, monkeypatch):
"""Test all basic functionality of the RFLink switch component."""
# setup mocking rflink module
event_callback, create, protocol, _ = await mock_rflink(
hass, CONFIG, DOMAIN, monkeypatch
)
# make sure arguments are passed
assert create.call_args_list[0][1]["ignore"]
# test default state of light loaded from config
light_initial = hass.states.get(f"{DOMAIN}.test")
assert light_initial.state == "off"
assert light_initial.attributes["assumed_state"]
# light should follow state of the hardware device by interpreting
# incoming events for its name and aliases
# mock incoming command event for this device
event_callback({"id": "protocol_0_0", "command": "on"})
await hass.async_block_till_done()
light_after_first_command = hass.states.get(f"{DOMAIN}.test")
assert light_after_first_command.state == "on"
# also after receiving first command state not longer has to be assumed
assert not light_after_first_command.attributes.get("assumed_state")
# mock incoming command event for this device
event_callback({"id": "protocol_0_0", "command": "off"})
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.test").state == "off"
# should respond to group command
event_callback({"id": "protocol_0_0", "command": "allon"})
await hass.async_block_till_done()
light_after_first_command = hass.states.get(f"{DOMAIN}.test")
assert light_after_first_command.state == "on"
# should respond to group command
event_callback({"id": "protocol_0_0", "command": "alloff"})
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.test").state == "off"
# test following aliases
# mock incoming command event for this device alias
event_callback({"id": "test_alias_0_0", "command": "on"})
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.test").state == "on"
# test event for new unconfigured sensor
event_callback({"id": "protocol2_0_1", "command": "on"})
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.protocol2_0_1").state == "on"
# test changing state from HA propagates to RFLink
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: f"{DOMAIN}.test"}
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.test").state == "off"
assert protocol.send_command_ack.call_args_list[0][0][0] == "protocol_0_0"
assert protocol.send_command_ack.call_args_list[0][0][1] == "off"
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: f"{DOMAIN}.test"}
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.test").state == "on"
assert protocol.send_command_ack.call_args_list[1][0][1] == "on"
# protocols supporting dimming and on/off should create hybrid light entity
event_callback({"id": "newkaku_0_1", "command": "off"})
await hass.async_block_till_done()
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: f"{DOMAIN}.newkaku_0_1"}
)
)
await hass.async_block_till_done()
# dimmable should send highest dim level when turning on
assert protocol.send_command_ack.call_args_list[2][0][1] == "15"
# and send on command for fallback
assert protocol.send_command_ack.call_args_list[3][0][1] == "on"
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: f"{DOMAIN}.newkaku_0_1", ATTR_BRIGHTNESS: 128},
)
)
await hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[4][0][1] == "7"
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: f"{DOMAIN}.dim_test", ATTR_BRIGHTNESS: 128},
)
)
await hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[5][0][1] == "7"
async def test_firing_bus_event(hass, monkeypatch):
"""Incoming RFLink command events should be put on the HA event bus."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"protocol_0_0": {
"name": "test",
"aliases": ["test_alias_0_0"],
"fire_event": True,
}
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
calls = []
@callback
def listener(event):
calls.append(event)
hass.bus.async_listen_once(EVENT_BUTTON_PRESSED, listener)
# test event for new unconfigured sensor
event_callback({"id": "protocol_0_0", "command": "off"})
await hass.async_block_till_done()
await hass.async_block_till_done()
assert calls[0].data == {"state": "off", "entity_id": f"{DOMAIN}.test"}
async def test_signal_repetitions(hass, monkeypatch):
"""Command should be sent amount of configured repetitions."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"device_defaults": {"signal_repetitions": 3},
"devices": {
"protocol_0_0": {"name": "test", "signal_repetitions": 2},
"protocol_0_1": {"name": "test1"},
"newkaku_0_1": {"type": "hybrid"},
},
},
}
# setup mocking rflink module
event_callback, _, protocol, _ = await mock_rflink(
hass, config, DOMAIN, monkeypatch
)
# test if signal repetition is performed according to configuration
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: f"{DOMAIN}.test"}
)
)
# wait for commands and repetitions to finish
await hass.async_block_till_done()
assert protocol.send_command_ack.call_count == 2
# test if default apply to configured devices
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: f"{DOMAIN}.test1"}
)
)
# wait for commands and repetitions to finish
await hass.async_block_till_done()
assert protocol.send_command_ack.call_count == 5
# test if device defaults apply to newly created devices
event_callback({"id": "protocol_0_2", "command": "off"})
# make sure entity is created before setting state
await hass.async_block_till_done()
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: f"{DOMAIN}.protocol_0_2"}
)
)
# wait for commands and repetitions to finish
await hass.async_block_till_done()
assert protocol.send_command_ack.call_count == 8
async def test_signal_repetitions_alternation(hass, monkeypatch):
"""Simultaneously switching entities must alternate repetitions."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"protocol_0_0": {"name": "test", "signal_repetitions": 2},
"protocol_0_1": {"name": "test1", "signal_repetitions": 2},
},
},
}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: f"{DOMAIN}.test"}
)
await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: f"{DOMAIN}.test1"}
)
await hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[0][0][0] == "protocol_0_0"
assert protocol.send_command_ack.call_args_list[1][0][0] == "protocol_0_1"
assert protocol.send_command_ack.call_args_list[2][0][0] == "protocol_0_0"
assert protocol.send_command_ack.call_args_list[3][0][0] == "protocol_0_1"
async def test_signal_repetitions_cancelling(hass, monkeypatch):
"""Cancel outstanding repetitions when state changed."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {"protocol_0_0": {"name": "test", "signal_repetitions": 3}},
},
}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: f"{DOMAIN}.test"}
)
await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: f"{DOMAIN}.test"}, blocking=True
)
assert [call[0][1] for call in protocol.send_command_ack.call_args_list] == [
"off",
"on",
"on",
"on",
]
async def test_type_toggle(hass, monkeypatch):
"""Test toggle type lights (on/on)."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {"toggle_0_0": {"name": "toggle_test", "type": "toggle"}},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
# default value = 'off'
assert hass.states.get(f"{DOMAIN}.toggle_test").state == "off"
# test sending 'on' command, must set state = 'on'
event_callback({"id": "toggle_0_0", "command": "on"})
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.toggle_test").state == "on"
# test sending 'on' command again, must set state = 'off'
event_callback({"id": "toggle_0_0", "command": "on"})
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.toggle_test").state == "off"
# test async_turn_off, must set state = 'on' ('off' + toggle)
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: f"{DOMAIN}.toggle_test"}
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.toggle_test").state == "on"
# test async_turn_on, must set state = 'off' (yes, sounds crazy)
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: f"{DOMAIN}.toggle_test"}
)
)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.toggle_test").state == "off"
async def test_group_alias(hass, monkeypatch):
"""Group aliases should only respond to group commands (allon/alloff)."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"protocol_0_0": {"name": "test", "group_aliases": ["test_group_0_0"]}
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
assert hass.states.get(f"{DOMAIN}.test").state == "off"
# test sending group command to group alias
event_callback({"id": "test_group_0_0", "command": "allon"})
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.test").state == "on"
# test sending group command to group alias
event_callback({"id": "test_group_0_0", "command": "off"})
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.test").state == "on"
async def test_nogroup_alias(hass, monkeypatch):
"""Non group aliases should not respond to group commands."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"protocol_0_0": {
"name": "test",
"nogroup_aliases": ["test_nogroup_0_0"],
}
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
assert hass.states.get(f"{DOMAIN}.test").state == "off"
# test sending group command to nogroup alias
event_callback({"id": "test_nogroup_0_0", "command": "allon"})
await hass.async_block_till_done()
# should not affect state
assert hass.states.get(f"{DOMAIN}.test").state == "off"
# test sending group command to nogroup alias
event_callback({"id": "test_nogroup_0_0", "command": "on"})
await hass.async_block_till_done()
# should affect state
assert hass.states.get(f"{DOMAIN}.test").state == "on"
async def test_nogroup_device_id(hass, monkeypatch):
"""Device id that do not respond to group commands (allon/alloff)."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {"test_nogroup_0_0": {"name": "test", "group": False}},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
assert hass.states.get(f"{DOMAIN}.test").state == "off"
# test sending group command to nogroup
event_callback({"id": "test_nogroup_0_0", "command": "allon"})
await hass.async_block_till_done()
# should not affect state
assert hass.states.get(f"{DOMAIN}.test").state == "off"
# test sending group command to nogroup
event_callback({"id": "test_nogroup_0_0", "command": "on"})
await hass.async_block_till_done()
# should affect state
assert hass.states.get(f"{DOMAIN}.test").state == "on"
async def test_disable_automatic_add(hass, monkeypatch):
"""If disabled new devices should not be automatically added."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {"platform": "rflink", "automatic_add": False},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
# test event for new unconfigured sensor
event_callback({"id": "protocol_0_0", "command": "off"})
await hass.async_block_till_done()
# make sure new device is not added
assert not hass.states.get(f"{DOMAIN}.protocol_0_0")
async def test_restore_state(hass, monkeypatch):
"""Ensure states are restored on startup."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"NewKaku_12345678_0": {"name": "l1", "type": "hybrid"},
"test_restore_2": {"name": "l2"},
"test_restore_3": {"name": "l3"},
"test_restore_4": {"name": "l4", "type": "dimmable"},
"test_restore_5": {"name": "l5", "type": "dimmable"},
},
},
}
mock_restore_cache(
hass,
(
State(f"{DOMAIN}.l1", STATE_ON, {ATTR_BRIGHTNESS: "123"}),
State(f"{DOMAIN}.l2", STATE_ON, {ATTR_BRIGHTNESS: "321"}),
State(f"{DOMAIN}.l3", STATE_OFF),
State(f"{DOMAIN}.l5", STATE_ON, {ATTR_BRIGHTNESS: "222"}),
),
)
hass.state = CoreState.starting
# setup mocking rflink module
_, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
# hybrid light must restore brightness
state = hass.states.get(f"{DOMAIN}.l1")
assert state
assert state.state == STATE_ON
assert state.attributes[ATTR_BRIGHTNESS] == 123
# normal light do NOT must restore brightness
state = hass.states.get(f"{DOMAIN}.l2")
assert state
assert state.state == STATE_ON
assert not state.attributes.get(ATTR_BRIGHTNESS)
# OFF state also restores (or not)
state = hass.states.get(f"{DOMAIN}.l3")
assert state
assert state.state == STATE_OFF
# not cached light must default values
state = hass.states.get(f"{DOMAIN}.l4")
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_BRIGHTNESS] == 255
assert state.attributes["assumed_state"]
# test coverage for dimmable light
state = hass.states.get(f"{DOMAIN}.l5")
assert state
assert state.state == STATE_ON
assert state.attributes[ATTR_BRIGHTNESS] == 222
|
import unittest
from mock import Mock, call
from datetime import datetime
from trashcli.put import GlobalTrashCan
import os
class TestTopDirRules:
def test(self):
parent_path = lambda _:None
volume_of = lambda _:'/volume'
realpath = lambda _: None
fs = Mock(['move',
'atomic_write',
'remove_file',
'ensure_dir',
'isdir',
'islink',
'has_sticky_bit'])
fs.islink.side_effect = lambda path: {
'/volume/.Trash':False
}[path]
fs.has_sticky_bit.side_effect = lambda path: {
'/volume/.Trash':False
}[path]
reporter = Mock(['volume_of_file',
'found_unsecure_trash_dir_unsticky',
'trash_dir_with_volume',
'file_has_been_trashed_in_as'])
trashcan = GlobalTrashCan({},
volume_of,
reporter,
fs,
lambda :'uid',
datetime.now,
parent_path,
realpath,
Mock())
trashcan.trash('')
assert [
call('', '/volume/.Trash-uid')
] == reporter.file_has_been_trashed_in_as.mock_calls
class TestGlobalTrashCan(unittest.TestCase):
def setUp(self):
self.reporter = Mock()
self.fs = Mock()
self.volume_of = Mock()
self.volume_of.return_value = '/'
self.trashcan = GlobalTrashCan(
volume_of = self.volume_of,
reporter = self.reporter,
getuid = lambda:123,
now = datetime.now,
environ = dict(),
fs = self.fs,
parent_path = os.path.dirname,
realpath = lambda x:x,
logger = Mock())
def test_log_volume(self):
self.trashcan.trash('a-dir/with-a-file')
self.reporter.volume_of_file.assert_called_with('/')
def test_should_report_when_trash_fail(self):
self.fs.move.side_effect = IOError
self.trashcan.trash('non-existent')
self.reporter.unable_to_trash_file.assert_called_with('non-existent')
def test_should_not_delete_a_dot_entru(self):
self.trashcan.trash('.')
self.reporter.unable_to_trash_dot_entries.assert_called_with('.')
def test_bug(self):
self.fs.mock_add_spec([
'move',
'atomic_write',
'remove_file',
'ensure_dir',
'isdir',
'islink',
'has_sticky_bit',
], True)
self.fs.islink.side_effect = (lambda path: { '/.Trash':False }[path])
self.volume_of.side_effect = (lambda path: {
'/foo': '/',
'': '/',
'/.Trash/123': '/',
}[path])
self.trashcan.trash('foo')
def test_what_happen_when_trashing_with_trash_dir(self):
from trashcli.put import TrashDirectoryForPut
fs = Mock()
now = Mock()
fs.mock_add_spec([
'move', 'atomic_write', 'remove_file', 'ensure_dir',
], True)
from unittest import SkipTest
raise SkipTest("")
trash_dir = TrashDirectoryForPut('/path', '/volume', fs)
trash_dir.trash('garbage', now)
|
from Handler import Handler
import logging
try:
import gmetric
except ImportError:
gmetric = None
class GmetricHandler(Handler):
"""
Implements the abstract Handler class, sending data the same way that
gmetric does.
"""
def __init__(self, config=None):
"""
Create a new instance of the GmetricHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
if gmetric is None:
logging.error("Failed to load gmetric module")
return
# Initialize Data
self.socket = None
# Initialize Options
self.host = self.config['host']
self.port = int(self.config['port'])
self.protocol = self.config['protocol']
if not self.protocol:
self.protocol = 'udp'
# Initialize
self.gmetric = gmetric.Gmetric(self.host, self.port, self.protocol)
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(GmetricHandler, self).get_default_config_help()
config.update({
'host': 'Hostname',
'port': 'Port',
'protocol': 'udp or tcp',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GmetricHandler, self).get_default_config()
config.update({
'host': 'localhost',
'port': 8651,
'protocol': 'udp',
})
return config
def __del__(self):
"""
Destroy instance of the GmetricHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric by sending it to a gmond instance
"""
# Just send the data as a string
self._send(metric)
def _send(self, metric):
"""
Send data to gmond.
"""
metric_name = self.get_name_from_path(metric.path)
tmax = "60"
dmax = "0"
slope = "both"
# FIXME: Badness, shouldn't *assume* double type
metric_type = "double"
units = ""
group = ""
self.gmetric.send(metric_name,
metric.value,
metric_type,
units,
slope,
tmax,
dmax,
group)
def _close(self):
"""
Close the connection
"""
self.gmetric = None
|
from homeassistant.components.vacuum import STATE_DOCKED
from homeassistant.const import DEVICE_CLASS_BATTERY, PERCENTAGE
from homeassistant.helpers.icon import icon_for_battery_level
from .const import BLID, DOMAIN, ROOMBA_SESSION
from .irobot_base import IRobotEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the iRobot Roomba vacuum cleaner."""
domain_data = hass.data[DOMAIN][config_entry.entry_id]
roomba = domain_data[ROOMBA_SESSION]
blid = domain_data[BLID]
roomba_vac = RoombaBattery(roomba, blid)
async_add_entities([roomba_vac], True)
class RoombaBattery(IRobotEntity):
"""Class to hold Roomba Sensor basic info."""
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} Battery Level"
@property
def unique_id(self):
"""Return the ID of this sensor."""
return f"battery_{self._blid}"
@property
def device_class(self):
"""Return the device class of the sensor."""
return DEVICE_CLASS_BATTERY
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
return PERCENTAGE
@property
def icon(self):
"""Return the icon for the battery."""
charging = bool(self._robot_state == STATE_DOCKED)
return icon_for_battery_level(
battery_level=self._battery_level, charging=charging
)
@property
def state(self):
"""Return the state of the sensor."""
return self._battery_level
|
from traceback import format_exc, print_exc
import docutils.core
import pygal
from docutils.parsers.rst import Directive
from sphinx.directives.code import CodeBlock
# Patch default style
pygal.config.Config.style.value = pygal.style.RotateStyle(
'#2980b9',
background='#fcfcfc',
plot_background='#ffffff',
foreground='#707070',
foreground_strong='#404040',
foreground_subtle='#909090',
opacity='.8',
opacity_hover='.9',
transition='400ms ease-in'
)
class PygalDirective(Directive):
"""Execute the given python file and puts its result in the document."""
required_arguments = 0
optional_arguments = 2
final_argument_whitespace = True
has_content = True
def run(self):
width, height = map(int, self.arguments[:2]
) if len(self.arguments) >= 2 else (600, 400)
if len(self.arguments) == 1:
self.render_fix = bool(self.arguments[0])
elif len(self.arguments) == 3:
self.render_fix = bool(self.arguments[2])
else:
self.render_fix = False
self.content = list(self.content)
content = list(self.content)
if self.render_fix:
content[-1] = 'rv = ' + content[-1]
code = '\n'.join(content)
scope = {'pygal': pygal}
try:
exec(code, scope)
except Exception:
print(code)
print_exc()
return [
docutils.nodes.system_message(
'An exception as occured during code parsing:'
' \n %s' % format_exc(),
type='ERROR',
source='/',
level=3
)
]
if self.render_fix:
rv = scope['rv']
else:
chart = None
for key, value in scope.items():
if isinstance(value, pygal.graph.graph.Graph):
chart = value
self.content.append(key + '.render()')
break
if chart is None:
return [
docutils.nodes.system_message(
'No instance of graph found',
level=3,
type='ERROR',
source='/'
)
]
chart.config.width = width
chart.config.height = height
chart.explicit_size = True
try:
svg = '<embed src="%s" />' % chart.render_data_uri()
except Exception:
return [
docutils.nodes.system_message(
'An exception as occured during graph generation:'
' \n %s' % format_exc(),
type='ERROR',
source='/',
level=3
)
]
return [docutils.nodes.raw('', svg, format='html')]
class PygalWithCode(PygalDirective):
def run(self):
node_list = super(PygalWithCode, self).run()
node_list.extend(
CodeBlock(
self.name, ['python'], self.options, self.content, self.lineno,
self.content_offset, self.block_text, self.state,
self.state_machine
).run()
)
return [docutils.nodes.compound('', *node_list)]
class PygalTable(Directive):
"""Execute the given python file and puts its result in the document."""
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
has_content = True
def run(self):
self.content = list(self.content)
content = list(self.content)
content[-1] = 'rv = ' + content[-1]
code = '\n'.join(content)
scope = {'pygal': pygal}
try:
exec(code, scope)
except Exception:
print_exc()
return [
docutils.nodes.system_message(
'An exception as occured during code parsing:'
' \n %s' % format_exc(),
type='ERROR',
source='/',
level=3
)
]
rv = scope['rv']
return [docutils.nodes.raw('', rv, format='html')]
class PygalTableWithCode(PygalTable):
def run(self):
node_list = super(PygalTableWithCode, self).run()
node_list.extend(
CodeBlock(
self.name, ['python'], self.options, self.content, self.lineno,
self.content_offset, self.block_text, self.state,
self.state_machine
).run()
)
return [docutils.nodes.compound('', *node_list)]
def setup(app):
app.add_directive('pygal', PygalDirective)
app.add_directive('pygal-code', PygalWithCode)
app.add_directive('pygal-table', PygalTable)
app.add_directive('pygal-table-code', PygalTableWithCode)
return {'version': '1.0.1'}
|
from __future__ import print_function
try:
import cStringIO as stringio
except ImportError:
import io as stringio
import sys
from contextlib import contextmanager
from datetime import datetime as dt
import dateutil
import numpy as np
import pandas
from dateutil.rrule import rrule, DAILY
def dt_or_str_parser(string):
try:
return dateutil.parser.parse(string)
except ValueError:
return string.strip()
def read_str_as_pandas(ts_str, num_index=1):
labels = [x.strip() for x in ts_str.split('\n')[0].split('|')]
pd = pandas.read_csv(stringio.StringIO(ts_str), sep='|', index_col=list(range(num_index)), date_parser=dt_or_str_parser)
# Trim the whitespace on the column names
pd.columns = labels[num_index:]
pd.index.names = labels[0:num_index]
return pd
def get_large_ts(size=2500):
timestamps = list(rrule(DAILY, count=size, dtstart=dt(1970, 1, 1), interval=1))
pd = pandas.DataFrame(index=timestamps, data={'n' + str(i): np.random.random_sample(size) for i in range(size)})
pd.index.name = 'index'
return pd
@contextmanager
def _save_argv():
args = sys.argv[:]
yield
sys.argv = args
def run_as_main(fn, *args):
""" Run a given function as if it was the
system entry point, eg for testing scripts.
Eg::
from scripts.Foo import main
run_as_main(main, 'foo','bar')
This is equivalent to ``Foo foo bar``, assuming
``scripts.Foo.main`` is registered as an entry point.
"""
with _save_argv():
print("run_as_main: %s" % str(args))
sys.argv = ['progname'] + list(args)
return fn()
def multi_index_df_from_arrs(index_headers, index_arrs, data_dict):
parsed_indexes = []
for index in index_arrs:
try:
parsed_indexes.append(pandas.to_datetime(index))
except ValueError:
parsed_indexes.append(index)
m_index = pandas.MultiIndex.from_arrays(parsed_indexes, names=index_headers)
return pandas.DataFrame(data_dict, index=m_index)
|
import diamond.collector
import re
import os
_RE = re.compile(r'(\d+)\s+(\d+)\s+(\d+)')
class FilestatCollector(diamond.collector.Collector):
PROC = '/proc/sys/fs/file-nr'
def get_default_config_help(self):
config_help = super(FilestatCollector, self).get_default_config_help()
config_help.update({
'user_include': "This is list of users to collect data for."
" If this is left empty, its a wildcard"
" to collector for all users"
" (default = None)",
'user_exclude': "This is a list of users to exclude"
" from collecting data. If this is left empty,"
" no specific users will be excluded"
" (default = None)",
'group_include': "This is a list of groups to include"
" in data collection. This DOES NOT"
" override user_exclude."
" (default = None)",
'group_exclude': "This is a list of groups to exclude"
" from collecting data. It DOES NOT override"
" user_include. (default = None)",
'uid_min': "This creates a floor for the user's uid."
" This means that it WILL NOT collect data"
" for any user with a uid LOWER"
" than the specified minimum,"
" unless the user is told to be included"
" by user_include (default = 0)",
'uid_max': "This creates a ceiling for the user's uid."
" This means that it WILL NOT collect data"
" for any user with a uid HIGHER"
" than the specified maximum,"
" unless the user is told to be included"
" by user_include (default = 65536)",
'type_include': "This is a list of file types to collect"
" ('REG', 'DIR', 'FIFO', etc). If left empty,"
" will collect for all file types."
"(Note: it's suggested to not leave"
" type_include empty,"
" as it would add significant load"
" to your graphite box(es) (default = None)",
'type_exclude': "This is a list of tile types to exclude"
" from being collected for. If left empty,"
" no file types will be excluded. (default = None)",
'collect_user_data': "This enables or disables"
" the collection of user specific"
" file handles. (default = False)"
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(FilestatCollector, self).get_default_config()
config.update({
'path': 'files',
'user_include': None,
'user_exclude': None,
'group_include': None,
'group_exclude': None,
'uid_min': 0,
'uid_max': 65536,
'type_include': None,
'type_exclude': None,
'collect_user_data': False
})
return config
def get_userlist(self):
"""
This collects all the users with open files on the system, and filters
based on the variables user_include and user_exclude
"""
# convert user/group lists to arrays if strings
if isinstance(self.config['user_include'], basestring):
self.config['user_include'] = self.config['user_include'].split()
if isinstance(self.config['user_exclude'], basestring):
self.config['user_exclude'] = self.config['user_exclude'].split()
if isinstance(self.config['group_include'], basestring):
self.config['group_include'] = self.config['group_include'].split()
if isinstance(self.config['group_exclude'], basestring):
self.config['group_exclude'] = self.config['group_exclude'].split()
rawusers = os.popen("lsof | awk '{ print $3 }' | sort | uniq -d"
).read().split()
userlist = []
# remove any not on the user include list
if ((self.config['user_include'] is None or
len(self.config['user_include']) == 0)):
userlist = rawusers
else:
# only work with specified include list, which is added at the end
userlist = []
# add any user in the group include list
addedByGroup = []
if ((self.config['group_include'] is not None and
len(self.config['group_include']) > 0)):
for u in rawusers:
self.log.info(u)
# get list of groups of user
user_groups = os.popen("id -Gn %s" % (u)).read().split()
for gi in self.config['group_include']:
if gi in user_groups and u not in userlist:
userlist.append(u)
addedByGroup.append(u)
break
# remove any user in the exclude group list
if ((self.config['group_exclude'] is not None and
len(self.config['group_exclude']) > 0)):
# create tmp list to iterate over while editing userlist
tmplist = userlist[:]
for u in tmplist:
# get list of groups of user
groups = os.popen("id -Gn %s" % (u)).read().split()
for gi in self.config['group_exclude']:
if gi in groups:
userlist.remove(u)
break
# remove any that aren't within the uid limits
# make sure uid_min/max are ints
self.config['uid_min'] = int(self.config['uid_min'])
self.config['uid_max'] = int(self.config['uid_max'])
tmplist = userlist[:]
for u in tmplist:
if ((self.config['user_include'] is None or
u not in self.config['user_include'])):
if u not in addedByGroup:
uid = int(os.popen("id -u %s" % (u)).read())
if ((uid < self.config['uid_min'] and
self.config['uid_min'] is not None and
u in userlist)):
userlist.remove(u)
if ((uid > self.config['uid_max'] and
self.config['uid_max'] is not None and
u in userlist)):
userlist.remove(u)
# add users that are in the users include list
if ((self.config['user_include'] is not None and
len(self.config['user_include']) > 0)):
for u in self.config['user_include']:
if u in rawusers and u not in userlist:
userlist.append(u)
# remove any that is on the user exclude list
if ((self.config['user_exclude'] is not None and
len(self.config['user_exclude']) > 0)):
for u in self.config['user_exclude']:
if u in userlist:
userlist.remove(u)
return userlist
def get_typelist(self):
"""
This collects all avaliable types and applies include/exclude filters
"""
typelist = []
# convert type list into arrays if strings
if isinstance(self.config['type_include'], basestring):
self.config['type_include'] = self.config['type_include'].split()
if isinstance(self.config['type_exclude'], basestring):
self.config['type_exclude'] = self.config['type_exclude'].split()
# remove any not in include list
if self.config['type_include'] is None or len(
self.config['type_include']) == 0:
typelist = os.popen("lsof | awk '{ print $5 }' | sort | uniq -d"
).read().split()
else:
typelist = self.config['type_include']
# remove any in the exclude list
if self.config['type_exclude'] is not None and len(
self.config['type_include']) > 0:
for t in self.config['type_exclude']:
if t in typelist:
typelist.remove(t)
return typelist
def process_lsof(self, users, types):
"""
Get the list of users and file types to collect for and collect the
data from lsof
"""
d = {}
for u in users:
d[u] = {}
tmp = os.popen("lsof -wbu %s | awk '{ print $5 }'" % (
u)).read().split()
for t in types:
d[u][t] = tmp.count(t)
return d
def collect(self):
if not os.access(self.PROC, os.R_OK):
return None
# collect total open files
file = open(self.PROC)
for line in file:
match = _RE.match(line)
if match:
self.publish('assigned', int(match.group(1)))
self.publish('unused', int(match.group(2)))
self.publish('max', int(match.group(3)))
file.close()
# collect open files per user per type
if self.config['collect_user_data']:
data = self.process_lsof(self.get_userlist(), self.get_typelist())
for ukey in data.iterkeys():
for tkey in data[ukey].iterkeys():
self.log.debug('files.user.%s.%s %s' % (
ukey, tkey, int(data[ukey][tkey])))
self.publish('user.%s.%s' % (ukey, tkey),
int(data[ukey][tkey]))
|
Subsets and Splits